language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__maven
|
impl/maven-impl/src/main/java/org/apache/maven/impl/resolver/MavenSessionBuilderSupplier.java
|
{
"start": 3451,
"end": 8015
}
|
class ____ implements Supplier<SessionBuilder> {
protected final RepositorySystem repositorySystem;
protected final boolean mavenMaven3Personality;
protected final InternalScopeManager scopeManager;
public MavenSessionBuilderSupplier(RepositorySystem repositorySystem, boolean mavenMaven3Personality) {
this.repositorySystem = requireNonNull(repositorySystem);
this.mavenMaven3Personality = mavenMaven3Personality;
this.scopeManager = new ScopeManagerImpl(
mavenMaven3Personality
? Maven3ScopeManagerConfiguration.INSTANCE
: Maven4ScopeManagerConfiguration.INSTANCE);
}
protected DependencyTraverser getDependencyTraverser() {
return new FatArtifactTraverser();
}
protected InternalScopeManager getScopeManager() {
return scopeManager;
}
protected DependencyManager getDependencyManager() {
return getDependencyManager(!mavenMaven3Personality);
}
public DependencyManager getDependencyManager(boolean transitive) {
if (transitive) {
return new TransitiveDependencyManager(getScopeManager());
}
return new ClassicDependencyManager(getScopeManager());
}
protected DependencySelector getDependencySelector() {
return new AndDependencySelector(
ScopeDependencySelector.legacy(
null, Arrays.asList(DependencyScope.TEST.id(), DependencyScope.PROVIDED.id())),
OptionalDependencySelector.fromDirect(),
new ExclusionDependencySelector());
}
protected DependencyGraphTransformer getDependencyGraphTransformer() {
return new ChainedDependencyGraphTransformer(
new ConflictResolver(
new ConfigurableVersionSelector(), new ManagedScopeSelector(getScopeManager()),
new SimpleOptionalitySelector(), new ManagedScopeDeriver(getScopeManager())),
new ManagedDependencyContextRefiner(getScopeManager()));
}
/**
* This method produces "surrogate" type registry that is static: it aims users that want to use
* Maven-Resolver without involving Maven Core and related things.
* <p>
* This type registry is NOT used by Maven Core: Maven replaces it during Session creation with a type registry
* that supports extending it (i.e. via Maven Extensions).
* <p>
* Important: this "static" list of types should be in-sync with core provided types.
*/
protected ArtifactTypeRegistry getArtifactTypeRegistry() {
DefaultArtifactTypeRegistry stereotypes = new DefaultArtifactTypeRegistry();
new DefaultTypeProvider().types().forEach(stereotypes::add);
return stereotypes;
}
protected ArtifactDescriptorPolicy getArtifactDescriptorPolicy() {
return new SimpleArtifactDescriptorPolicy(true, true);
}
protected void configureSessionBuilder(SessionBuilder session) {
session.setDependencyTraverser(getDependencyTraverser());
session.setDependencyManager(getDependencyManager());
session.setDependencySelector(getDependencySelector());
session.setDependencyGraphTransformer(getDependencyGraphTransformer());
session.setArtifactTypeRegistry(getArtifactTypeRegistry());
session.setArtifactDescriptorPolicy(getArtifactDescriptorPolicy());
session.setScopeManager(getScopeManager());
}
/**
* Creates a new Maven-like repository system session by initializing the session with values typical for
* Maven-based resolution. In more detail, this method configures settings relevant for the processing of dependency
* graphs, most other settings remain at their generic default value. Use the various setters to further configure
* the session with authentication, mirror, proxy and other information required for your environment. At least,
* local repository manager needs to be configured to make session be able to create session instance.
*
* @return SessionBuilder configured with minimally required things for "Maven-based resolution". At least LRM must
* be set on builder to make it able to create session instances.
*/
@Override
public SessionBuilder get() {
requireNonNull(repositorySystem, "repositorySystem");
SessionBuilder builder = repositorySystem.createSessionBuilder();
configureSessionBuilder(builder);
return builder;
}
}
|
MavenSessionBuilderSupplier
|
java
|
apache__rocketmq
|
tools/src/test/java/org/apache/rocketmq/tools/command/connection/ProducerConnectionSubCommandTest.java
|
{
"start": 1508,
"end": 3021
}
|
class ____ {
private ServerResponseMocker brokerMocker;
private ServerResponseMocker nameServerMocker;
@Before
public void before() {
brokerMocker = startOneBroker();
nameServerMocker = NameServerMocker.startByDefaultConf(brokerMocker.listenPort());
}
@After
public void after() {
brokerMocker.shutdown();
nameServerMocker.shutdown();
}
@Test
public void testExecute() throws SubCommandException {
ProducerConnectionSubCommand cmd = new ProducerConnectionSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-g default-producer-group", "-t unit-test", String.format("-n localhost:%d", nameServerMocker.listenPort())};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
private ServerResponseMocker startOneBroker() {
ProducerConnection producerConnection = new ProducerConnection();
HashSet<Connection> connectionSet = new HashSet<>();
Connection connection = mock(Connection.class);
connectionSet.add(connection);
producerConnection.setConnectionSet(connectionSet);
// start broker
return ServerResponseMocker.startServer(producerConnection.encode());
}
}
|
ProducerConnectionSubCommandTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ThreadJoinLoopTest.java
|
{
"start": 6517,
"end": 7680
}
|
class ____ extends Thread {
public void run() {
try {
// BUG: Diagnostic contains: Uninterruptibles.joinUninterruptibly(this)
join();
} catch (InterruptedException e) {
// ignore
}
}
public void whileInThread() {
while (isAlive()) {
try {
// BUG: Diagnostic contains: Uninterruptibles.joinUninterruptibly(this)
join();
} catch (InterruptedException e) {
// Ignore.
}
}
}
}
}\
""")
.doTest();
}
@Test
public void negativeCases() {
compilationTestHelper
.addSourceLines(
"ThreadJoinLoopNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.util.List;
/**
* @author mariasam@google.com (Maria Sam) on 7/10/17.
*/
public
|
MyThread
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3183LoggingToFileTest.java
|
{
"start": 1451,
"end": 2693
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that the CLI parameter -l can be used to direct logging to a file.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-3183");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.addCliArgument("-l");
verifier.addCliArgument("maven.log");
verifier.setLogFileName("stdout.txt");
new File(testDir, "stdout.txt").delete();
new File(testDir, "maven.log").delete();
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> stdout = verifier.loadLines("stdout.txt");
for (Iterator<String> it = stdout.iterator(); it.hasNext(); ) {
String line = it.next();
if (line.startsWith("+") || line.startsWith("EMMA")) {
it.remove();
}
}
assertEquals(Collections.EMPTY_LIST, stdout);
List<String> log = verifier.loadLines("maven.log");
assertFalse(log.isEmpty());
}
}
|
MavenITmng3183LoggingToFileTest
|
java
|
netty__netty
|
codec-http3/src/main/java/io/netty/handler/codec/http3/QpackStaticTable.java
|
{
"start": 850,
"end": 9767
}
|
class ____ {
static final int NOT_FOUND = -1;
/**
* Special mask used to disambiguate exact pair index from
* name only index and avoid executing lookup twice. Supposed
* to be used internally. The value should be large enough
* not to override bits from static table index (current size
* of the table is 99 elements).
*/
static final int MASK_NAME_REF = 1 << 10;
/**
* <a href="https://www.rfc-editor.org/rfc/rfc9204.html#name-static-table-2>Appendix A: Static Table</a>
*/
private static final List<QpackHeaderField> STATIC_TABLE = Arrays.asList(
newEmptyHeaderField(":authority"),
newHeaderField(":path", "/"),
newHeaderField("age", "0"),
newEmptyHeaderField("content-disposition"),
newHeaderField("content-length", "0"),
newEmptyHeaderField("cookie"),
newEmptyHeaderField("date"),
newEmptyHeaderField("etag"),
newEmptyHeaderField("if-modified-since"),
newEmptyHeaderField("if-none-match"),
newEmptyHeaderField("last-modified"),
newEmptyHeaderField("link"),
newEmptyHeaderField("location"),
newEmptyHeaderField("referer"),
newEmptyHeaderField("set-cookie"),
newHeaderField(":method", "CONNECT"),
newHeaderField(":method", "DELETE"),
newHeaderField(":method", "GET"),
newHeaderField(":method", "HEAD"),
newHeaderField(":method", "OPTIONS"),
newHeaderField(":method", "POST"),
newHeaderField(":method", "PUT"),
newHeaderField(":scheme", "http"),
newHeaderField(":scheme", "https"),
newHeaderField(":status", "103"),
newHeaderField(":status", "200"),
newHeaderField(":status", "304"),
newHeaderField(":status", "404"),
newHeaderField(":status", "503"),
newHeaderField("accept", "*/*"),
newHeaderField("accept", "application/dns-message"),
newHeaderField("accept-encoding", "gzip, deflate, br"),
newHeaderField("accept-ranges", "bytes"),
newHeaderField("access-control-allow-headers", "cache-control"),
newHeaderField("access-control-allow-headers", "content-type"),
newHeaderField("access-control-allow-origin", "*"),
newHeaderField("cache-control", "max-age=0"),
newHeaderField("cache-control", "max-age=2592000"),
newHeaderField("cache-control", "max-age=604800"),
newHeaderField("cache-control", "no-cache"),
newHeaderField("cache-control", "no-store"),
newHeaderField("cache-control", "public, max-age=31536000"),
newHeaderField("content-encoding", "br"),
newHeaderField("content-encoding", "gzip"),
newHeaderField("content-type", "application/dns-message"),
newHeaderField("content-type", "application/javascript"),
newHeaderField("content-type", "application/json"),
newHeaderField("content-type", "application/x-www-form-urlencoded"),
newHeaderField("content-type", "image/gif"),
newHeaderField("content-type", "image/jpeg"),
newHeaderField("content-type", "image/png"),
newHeaderField("content-type", "text/css"),
newHeaderField("content-type", "text/html;charset=utf-8"),
newHeaderField("content-type", "text/plain"),
newHeaderField("content-type", "text/plain;charset=utf-8"),
newHeaderField("range", "bytes=0-"),
newHeaderField("strict-transport-security", "max-age=31536000"),
newHeaderField("strict-transport-security", "max-age=31536000;includesubdomains"),
newHeaderField("strict-transport-security", "max-age=31536000;includesubdomains;preload"),
newHeaderField("vary", "accept-encoding"),
newHeaderField("vary", "origin"),
newHeaderField("x-content-type-options", "nosniff"),
newHeaderField("x-xss-protection", "1; mode=block"),
newHeaderField(":status", "100"),
newHeaderField(":status", "204"),
newHeaderField(":status", "206"),
newHeaderField(":status", "302"),
newHeaderField(":status", "400"),
newHeaderField(":status", "403"),
newHeaderField(":status", "421"),
newHeaderField(":status", "425"),
newHeaderField(":status", "500"),
newEmptyHeaderField("accept-language"),
newHeaderField("access-control-allow-credentials", "FALSE"),
newHeaderField("access-control-allow-credentials", "TRUE"),
newHeaderField("access-control-allow-headers", "*"),
newHeaderField("access-control-allow-methods", "get"),
newHeaderField("access-control-allow-methods", "get, post, options"),
newHeaderField("access-control-allow-methods", "options"),
newHeaderField("access-control-expose-headers", "content-length"),
newHeaderField("access-control-request-headers", "content-type"),
newHeaderField("access-control-request-method", "get"),
newHeaderField("access-control-request-method", "post"),
newHeaderField("alt-svc", "clear"),
newEmptyHeaderField("authorization"),
newHeaderField("content-security-policy", "script-src 'none';object-src 'none';base-uri 'none'"),
newHeaderField("early-data", "1"),
newEmptyHeaderField("expect-ct"),
newEmptyHeaderField("forwarded"),
newEmptyHeaderField("if-range"),
newEmptyHeaderField("origin"),
newHeaderField("purpose", "prefetch"),
newEmptyHeaderField("server"),
newHeaderField("timing-allow-origin", "*"),
newHeaderField("upgrade-insecure-requests", "1"),
newEmptyHeaderField("user-agent"),
newEmptyHeaderField("x-forwarded-for"),
newHeaderField("x-frame-options", "deny"),
newHeaderField("x-frame-options", "sameorigin"));
/**
* The number of header fields in the static table.
*/
static final int length = STATIC_TABLE.size();
private static final CharSequenceMap<List<Integer>> STATIC_INDEX_BY_NAME = createMap(length);
private static QpackHeaderField newEmptyHeaderField(String name) {
return new QpackHeaderField(AsciiString.cached(name), AsciiString.EMPTY_STRING);
}
private static QpackHeaderField newHeaderField(String name, String value) {
return new QpackHeaderField(AsciiString.cached(name), AsciiString.cached(value));
}
/**
* Return the header field at the given index value.
* Note that QPACK uses 0-based indexing when HPACK is using 1-based.
*/
static QpackHeaderField getField(int index) {
return STATIC_TABLE.get(index);
}
/**
* Returns the lowest index value for the given header field name in the static
* table. Returns -1 if the header field name is not in the static table.
*/
static int getIndex(CharSequence name) {
List<Integer> index = STATIC_INDEX_BY_NAME.get(name);
if (index == null) {
return NOT_FOUND;
}
return index.get(0);
}
/**
* Returns:
* a) the index value for the given header field in the static table (when found);
* b) the index value for a given name with a single bit masked (no exact match);
* c) -1 if name was not found in the static table.
*/
static int findFieldIndex(CharSequence name, CharSequence value) {
final List<Integer> nameIndex = STATIC_INDEX_BY_NAME.get(name);
// Early return if name not found in the table.
if (nameIndex == null) {
return NOT_FOUND;
}
// If name was found, check all subsequence elements of the table for exact match.
for (int index: nameIndex) {
QpackHeaderField field = STATIC_TABLE.get(index);
if (QpackUtil.equalsVariableTime(value, field.value)) {
return index;
}
}
// No exact match was found but we still can reference the name.
return nameIndex.get(0) | MASK_NAME_REF;
}
/**
* Creates a map CharSequenceMap header name to index value to allow quick lookup.
*/
@SuppressWarnings("unchecked")
private static CharSequenceMap<List<Integer>> createMap(int length) {
CharSequenceMap<List<Integer>> mapping =
new CharSequenceMap<List<Integer>>(true, UnsupportedValueConverter.<List<Integer>>instance(), length);
for (int index = 0; index < length; index++) {
final QpackHeaderField field = getField(index);
final List<Integer> cursor = mapping.get(field.name);
if (cursor == null) {
final List<Integer> holder = new ArrayList<>(16);
holder.add(index);
mapping.set(field.name, holder);
} else {
cursor.add(index);
}
}
return mapping;
}
private QpackStaticTable() {
}
}
|
QpackStaticTable
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/descriptor/DisplayNameUtilsTests.java
|
{
"start": 6706,
"end": 6820
}
|
class ____ {
}
@DisplayNameGeneration(value = DisplayNameGenerator.Standard.class)
static
|
BlankDisplayNameTestCase
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java
|
{
"start": 1466,
"end": 3589
}
|
class ____ extends UnaryScalarFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"BitLength",
BitLength::new
);
@FunctionInfo(
returnType = "integer",
description = "Returns the bit length of a string.",
note = "All strings are in UTF-8, so a single character can use multiple bytes.",
examples = @Example(file = "docs", tag = "bitLength")
)
public BitLength(
Source source,
@Param(
name = "string",
type = { "keyword", "text" },
description = "String expression. If `null`, the function returns `null`."
) Expression field
) {
super(source, field);
}
private BitLength(StreamInput in) throws IOException {
this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
source().writeTo(out);
out.writeNamedWriteable(field());
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public DataType dataType() {
return DataType.INTEGER;
}
@Override
protected TypeResolution resolveType() {
return childrenResolved() == false ? new TypeResolution("Unresolved children") : isString(field(), sourceText(), DEFAULT);
}
@Evaluator(warnExceptions = { ArithmeticException.class })
static int process(BytesRef val) {
return Math.multiplyExact(val.length, Byte.SIZE);
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new BitLength(source(), newChildren.get(0));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, BitLength::new, field());
}
@Override
public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) {
return new BitLengthEvaluator.Factory(source(), toEvaluator.apply(field()));
}
}
|
BitLength
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java
|
{
"start": 6427,
"end": 7295
}
|
class ____ extends LegacyActionRequest {
private final List<String> docFeatures;
public TestRerankingActionRequest(List<String> docFeatures) {
super();
this.docFeatures = docFeatures;
}
public TestRerankingActionRequest(StreamInput in) throws IOException {
super(in);
this.docFeatures = in.readCollectionAsList(StreamInput::readString);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(docFeatures, StreamOutput::writeString);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
public boolean shouldFail() {
return false;
}
}
public static
|
TestRerankingActionRequest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLAlterSystemSetConfigStatement.java
|
{
"start": 825,
"end": 1384
}
|
class ____ extends SQLStatementImpl implements SQLAlterStatement {
private List<SQLAssignItem> options = new ArrayList<SQLAssignItem>();
public List<SQLAssignItem> getOptions() {
return options;
}
public void addOption(SQLAssignItem item) {
item.setParent(this);
this.options.add(item);
}
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, options);
}
visitor.endVisit(this);
}
}
|
SQLAlterSystemSetConfigStatement
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/inlinedirtychecking/SimpleDynamicUpdateTest.java
|
{
"start": 6519,
"end": 7026
}
|
class ____ {
@Id
private Integer id;
@NotNull
private String name;
private String description;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
@Embeddable
public static
|
Role
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/TestChangeLoggerKv.java
|
{
"start": 1149,
"end": 4282
}
|
class ____<State> implements KvStateChangeLogger<State, String> {
boolean stateUpdated;
boolean stateUpdatedInternal;
boolean stateAdded;
boolean stateCleared;
boolean stateElementAdded;
boolean stateElementChanged;
boolean stateElementRemoved;
boolean stateMerged;
final BiFunction<State, State, State> stateAggregator;
State state;
public static <T> TestChangeLoggerKv<List<T>> forList(List<T> data) {
return new TestChangeLoggerKv<>(
data,
(a, b) -> {
ArrayList<T> c = new ArrayList<>();
if (a != null) {
c.addAll(a);
}
if (b != null) {
c.addAll(b);
}
return c;
});
}
public static <K, V> TestChangeLoggerKv<Map<K, V>> forMap(Map<K, V> data) {
return new TestChangeLoggerKv<>(
data,
(a, b) -> {
HashMap<K, V> c = new HashMap<>();
if (a != null) {
c.putAll(a);
}
if (b != null) {
c.putAll(b);
}
return c;
});
}
TestChangeLoggerKv(State initState, BiFunction<State, State, State> stateAggregator) {
this.stateAggregator = stateAggregator;
this.state = initState;
}
@Override
public void valueUpdated(State newState, String ns) {
stateUpdated = true;
state = newState;
}
@Override
public void valueUpdatedInternal(State newState, String ns) {
stateUpdatedInternal = true;
state = newState;
}
@Override
public void valueAdded(State addedState, String ns) {
stateAdded = true;
state = stateAggregator.apply(state, addedState);
}
@Override
public void valueCleared(String ns) {
stateCleared = true;
state = null;
}
@Override
public void valueElementAdded(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, String ns) {
stateElementAdded = true;
}
@Override
public void valueElementAddedOrUpdated(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, String ns) {
stateElementChanged = true;
}
@Override
public void valueElementRemoved(
ThrowingConsumer<DataOutputView, IOException> dataSerializer, String ns) {
stateElementRemoved = true;
}
@Override
public void resetWritingMetaFlag() {}
@Override
public void namespacesMerged(String target, Collection<String> sources) {
stateMerged = true;
}
public boolean anythingChanged() {
return stateUpdated
|| stateUpdatedInternal
|| stateAdded
|| stateCleared
|| stateElementChanged
|| stateElementRemoved
|| stateMerged;
}
@Override
public void close() {}
}
|
TestChangeLoggerKv
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java
|
{
"start": 3904,
"end": 5196
}
|
class ____ {
private static int MAX_RETRIES = 10;
private RMApplicationHistoryWriter writer;
private ApplicationHistoryStore store;
private List<CounterDispatcher> dispatchers =
new ArrayList<CounterDispatcher>();
@BeforeEach
public void setup() {
store = new MemoryApplicationHistoryStore();
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true);
conf.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
writer = new RMApplicationHistoryWriter() {
@Override
protected ApplicationHistoryStore createApplicationHistoryStore(
Configuration conf) {
return store;
}
@Override
protected Dispatcher createDispatcher(Configuration conf) {
MultiThreadedDispatcher dispatcher =
new MultiThreadedDispatcher(
conf
.getInt(
YarnConfiguration.RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE,
YarnConfiguration.DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE));
dispatcher.setDrainEventsOnStop();
return dispatcher;
}
|
TestRMApplicationHistoryWriter
|
java
|
resilience4j__resilience4j
|
resilience4j-micrometer/src/main/java/io/github/resilience4j/micrometer/tagged/TaggedRateLimiterMetricsPublisher.java
|
{
"start": 875,
"end": 1679
}
|
class ____
extends AbstractRateLimiterMetrics implements MetricsPublisher<RateLimiter> {
private final MeterRegistry meterRegistry;
public TaggedRateLimiterMetricsPublisher(MeterRegistry meterRegistry) {
super(RateLimiterMetricNames.ofDefaults());
this.meterRegistry = requireNonNull(meterRegistry);
}
public TaggedRateLimiterMetricsPublisher(RateLimiterMetricNames names, MeterRegistry meterRegistry) {
super(names);
this.meterRegistry = requireNonNull(meterRegistry);
}
@Override
public void publishMetrics(RateLimiter entry) {
addMetrics(meterRegistry, entry);
}
@Override
public void removeMetrics(RateLimiter entry) {
removeMetrics(meterRegistry, entry.getName());
}
}
|
TaggedRateLimiterMetricsPublisher
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
|
{
"start": 9258,
"end": 9771
}
|
class ____ rendering
*/
@Test
public void testConfiguration() {
appController.conf();
assertEquals(JobConfPage.class, appController.getClazz());
}
/**
* Test downloadConf request handling.
*/
@Test
public void testDownloadConfiguration() {
appController.downloadConf();
String jobConfXml = appController.getData();
assertTrue(!jobConfXml.contains("Error"), "Error downloading the job configuration file.");
}
/**
* Test method 'conf'. Should set AttemptsPage
|
for
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java
|
{
"start": 6898,
"end": 7336
}
|
class ____ {
Call<ResponseBody> method() {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"HTTP method annotation is required (e.g., @GET, @POST, etc.).\n for method Example.method");
}
}
@Test
public void implicitMultipartForbidden() {
|
Example
|
java
|
apache__camel
|
core/camel-main/src/test/java/org/apache/camel/main/MainIoCTest.java
|
{
"start": 1517,
"end": 1626
}
|
class ____ {
@Test
public void testMainIoC() throws Exception {
// use configuration
|
MainIoCTest
|
java
|
apache__camel
|
components/camel-wasm/src/main/java/org/apache/camel/wasm/WasmFunction.java
|
{
"start": 1099,
"end": 3609
}
|
class ____ implements AutoCloseable {
private final Lock lock;
private final WasmModule module;
private final String functionName;
private final Instance instance;
private final ExportFunction function;
private final ExportFunction alloc;
private final ExportFunction dealloc;
public WasmFunction(WasmModule module, String functionName) {
this.lock = new ReentrantLock();
this.module = Objects.requireNonNull(module);
this.functionName = Objects.requireNonNull(functionName);
this.instance = Instance.builder(this.module).build();
this.function = this.instance.export(this.functionName);
this.alloc = this.instance.export(Wasm.FN_ALLOC);
this.dealloc = this.instance.export(Wasm.FN_DEALLOC);
}
public byte[] run(byte[] in) throws Exception {
Objects.requireNonNull(in);
int inPtr = -1;
int inSize = in.length;
int outPtr = -1;
int outSize = 0;
//
// Wasm execution is not thread safe so we must put a
// synchronization guard around the function execution
//
lock.lock();
try {
try {
inPtr = (int) alloc.apply(inSize)[0];
instance.memory().write(inPtr, in);
long[] results = function.apply(inPtr, inSize);
long ptrAndSize = results[0];
outPtr = (int) (ptrAndSize >> 32);
outSize = (int) ptrAndSize;
// assume the max output is 31 bit, leverage the first bit for
// error detection
if (isError(outSize)) {
int errSize = errSize(outSize);
String errData = instance.memory().readString(outPtr, errSize);
throw new RuntimeException(errData);
}
return instance.memory().readBytes(outPtr, outSize);
} finally {
if (inPtr != -1) {
dealloc.apply(inPtr, inSize);
}
if (outPtr != -1) {
dealloc.apply(outPtr, outSize);
}
}
} finally {
lock.unlock();
}
}
@Override
public void close() throws Exception {
}
private static boolean isError(int number) {
return (number & (1 << 31)) != 0;
}
private static int errSize(int number) {
return number & (~(1 << 31));
}
}
|
WasmFunction
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/task/SyncTaskExecutorTests.java
|
{
"start": 3386,
"end": 4062
}
|
class ____ {
final AtomicInteger current = new AtomicInteger();
final AtomicInteger counter = new AtomicInteger();
public void concurrentOperation() {
if (current.incrementAndGet() > 2) {
throw new IllegalStateException();
}
try {
Thread.sleep(10);
}
catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
current.decrementAndGet();
counter.incrementAndGet();
}
public String concurrentOperationWithResult() {
concurrentOperation();
return "result";
}
public String concurrentOperationWithException() throws IOException {
concurrentOperation();
throw new IOException();
}
}
}
|
ConcurrentClass
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/annotation/DeserializeUsingTest.java
|
{
"start": 1270,
"end": 1633
}
|
class ____ {
@JSONField(deserializeUsing = SubjectListDeserializer.class)
private List<Integer> subjectList;
public List<Integer> getSubjectList() {
return subjectList;
}
public void setSubjectList(List<Integer> subjectList) {
this.subjectList = subjectList;
}
}
public static
|
Teacher
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/AnnotationTypeMappingsTests.java
|
{
"start": 30273,
"end": 30676
}
|
interface ____ {
@AliasFor(annotation = MultipleRoutesToAliasC.class, attribute = "c2")
String b1() default "";
@AliasFor(annotation = MultipleRoutesToAliasC.class, attribute = "c2")
String b2() default "";
@AliasFor(annotation = MultipleRoutesToAliasC.class, attribute = "c1")
String b3() default "";
}
@Retention(RetentionPolicy.RUNTIME)
@MultipleRoutesToAliasB
@
|
MultipleRoutesToAliasB
|
java
|
alibaba__nacos
|
common/src/main/java/com/alibaba/nacos/common/trace/event/naming/RegisterInstanceTraceEvent.java
|
{
"start": 747,
"end": 1869
}
|
class ____ extends NamingTraceEvent {
private static final long serialVersionUID = -8283438151444483864L;
private final String clientIp;
private final boolean rpc;
private final String instanceIp;
private final int instancePort;
public String getClientIp() {
return clientIp;
}
public boolean isRpc() {
return rpc;
}
public String getInstanceIp() {
return instanceIp;
}
public int getInstancePort() {
return instancePort;
}
public String toInetAddr() {
return instanceIp + ":" + instancePort;
}
public RegisterInstanceTraceEvent(long eventTime, String clientIp, boolean rpc, String serviceNamespace,
String serviceGroup, String serviceName, String instanceIp, int instancePort) {
super("REGISTER_INSTANCE_TRACE_EVENT", eventTime, serviceNamespace, serviceGroup, serviceName);
this.clientIp = clientIp;
this.rpc = rpc;
this.instanceIp = instanceIp;
this.instancePort = instancePort;
}
}
|
RegisterInstanceTraceEvent
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-jpa/src/main/java/org/springframework/boot/data/jpa/autoconfigure/EnversRevisionRepositoriesRegistrar.java
|
{
"start": 1071,
"end": 1378
}
|
class ____ extends DataJpaRepositoriesRegistrar {
@Override
protected Class<?> getConfiguration() {
return EnableJpaRepositoriesConfiguration.class;
}
@EnableJpaRepositories(repositoryFactoryBeanClass = EnversRevisionRepositoryFactoryBean.class)
private static final
|
EnversRevisionRepositoriesRegistrar
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/jobmanager/scheduler/Locality.java
|
{
"start": 870,
"end": 1347
}
|
enum ____ {
/** No constraint existed on the task placement. */
UNCONSTRAINED,
/** The task was scheduled into the same TaskManager as requested */
LOCAL,
/** The task was scheduled onto the same host as requested */
HOST_LOCAL,
/** The task was scheduled to a destination not included in its locality preferences. */
NON_LOCAL,
/** No locality information was provided, it is unknown if the locality was respected */
UNKNOWN
}
|
Locality
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/aot/hint/annotation/RegisterReflectionReflectiveProcessorTests.java
|
{
"start": 5021,
"end": 5197
}
|
class ____ {
@RegisterReflection(memberCategories = MemberCategory.INVOKE_DECLARED_CONSTRUCTORS)
private void doReflection() {
}
}
static
|
RegistrationMethodWithoutTarget
|
java
|
grpc__grpc-java
|
services/src/test/java/io/grpc/protobuf/services/ChannelzServiceTest.java
|
{
"start": 1884,
"end": 10562
}
|
class ____ {
// small value to force pagination
private static final int MAX_PAGE_SIZE = 1;
private final InternalChannelz channelz = new InternalChannelz();
private ChannelzService service = new ChannelzService(channelz, MAX_PAGE_SIZE);
@Test
public void getTopChannels_empty() {
assertEquals(
GetTopChannelsResponse.newBuilder().setEnd(true).build(),
getTopChannelHelper(0));
}
@Test
public void getTopChannels_onePage() throws Exception {
TestChannel root = new TestChannel();
channelz.addRootChannel(root);
assertEquals(
GetTopChannelsResponse
.newBuilder()
.addChannel(ChannelzProtoUtil.toChannel(root))
.setEnd(true)
.build(),
getTopChannelHelper(0));
}
@Test
public void getChannel() throws ExecutionException, InterruptedException {
TestChannel root = new TestChannel();
assertChannelNotFound(root.getLogId().getId());
channelz.addRootChannel(root);
assertEquals(
GetChannelResponse
.newBuilder()
.setChannel(ChannelzProtoUtil.toChannel(root))
.build(),
getChannelHelper(root.getLogId().getId()));
channelz.removeRootChannel(root);
assertChannelNotFound(root.getLogId().getId());
}
@Test
public void getSubchannel() throws Exception {
TestChannel subchannel = new TestChannel();
assertSubchannelNotFound(subchannel.getLogId().getId());
channelz.addSubchannel(subchannel);
assertEquals(
GetSubchannelResponse
.newBuilder()
.setSubchannel(ChannelzProtoUtil.toSubchannel(subchannel))
.build(),
getSubchannelHelper(subchannel.getLogId().getId()));
channelz.removeSubchannel(subchannel);
assertSubchannelNotFound(subchannel.getLogId().getId());
}
@Test
public void getServers_empty() {
assertEquals(
GetServersResponse.newBuilder().setEnd(true).build(),
getServersHelper(0));
}
@Test
public void getServers_onePage() throws Exception {
TestServer server = new TestServer();
channelz.addServer(server);
assertEquals(
GetServersResponse
.newBuilder()
.addServer(ChannelzProtoUtil.toServer(server))
.setEnd(true)
.build(),
getServersHelper(0));
}
@Test
public void getServer() throws ExecutionException, InterruptedException {
TestServer server = new TestServer();
assertServerNotFound(server.getLogId().getId());
channelz.addServer(server);
assertEquals(
GetServerResponse
.newBuilder()
.setServer(ChannelzProtoUtil.toServer(server))
.build(),
getServerHelper(server.getLogId().getId()));
channelz.removeServer(server);
assertServerNotFound(server.getLogId().getId());
}
@Test
public void getSocket() throws Exception {
TestSocket socket = new TestSocket();
assertSocketNotFound(socket.getLogId().getId());
channelz.addClientSocket(socket);
assertEquals(
GetSocketResponse
.newBuilder()
.setSocket(ChannelzProtoUtil.toSocket(socket))
.build(),
getSocketHelper(socket.getLogId().getId()));
channelz.removeClientSocket(socket);
assertSocketNotFound(socket.getLogId().getId());
}
private GetTopChannelsResponse getTopChannelHelper(long startId) {
@SuppressWarnings("unchecked")
StreamObserver<GetTopChannelsResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetTopChannelsResponse> responseCaptor
= ArgumentCaptor.forClass(GetTopChannelsResponse.class);
service.getTopChannels(
GetTopChannelsRequest.newBuilder().setStartChannelId(startId).build(),
observer);
verify(observer).onNext(responseCaptor.capture());
verify(observer).onCompleted();
return responseCaptor.getValue();
}
private GetChannelResponse getChannelHelper(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetChannelResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetChannelResponse> response
= ArgumentCaptor.forClass(GetChannelResponse.class);
service.getChannel(GetChannelRequest.newBuilder().setChannelId(id).build(), observer);
verify(observer).onNext(response.capture());
verify(observer).onCompleted();
return response.getValue();
}
private void assertChannelNotFound(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetChannelResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<Exception> exceptionCaptor = ArgumentCaptor.forClass(Exception.class);
service.getChannel(GetChannelRequest.newBuilder().setChannelId(id).build(), observer);
verify(observer).onError(exceptionCaptor.capture());
Status s = Status.fromThrowable(exceptionCaptor.getValue());
assertWithMessage(s.toString()).that(s.getCode()).isEqualTo(Status.Code.NOT_FOUND);
}
private GetSubchannelResponse getSubchannelHelper(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetSubchannelResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetSubchannelResponse> response
= ArgumentCaptor.forClass(GetSubchannelResponse.class);
service.getSubchannel(GetSubchannelRequest.newBuilder().setSubchannelId(id).build(), observer);
verify(observer).onNext(response.capture());
verify(observer).onCompleted();
return response.getValue();
}
private void assertSubchannelNotFound(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetSubchannelResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<Exception> exceptionCaptor = ArgumentCaptor.forClass(Exception.class);
service.getSubchannel(GetSubchannelRequest.newBuilder().setSubchannelId(id).build(), observer);
verify(observer).onError(exceptionCaptor.capture());
Status s = Status.fromThrowable(exceptionCaptor.getValue());
assertWithMessage(s.toString()).that(s.getCode()).isEqualTo(Status.Code.NOT_FOUND);
}
private GetServersResponse getServersHelper(long startId) {
@SuppressWarnings("unchecked")
StreamObserver<GetServersResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetServersResponse> responseCaptor
= ArgumentCaptor.forClass(GetServersResponse.class);
service.getServers(
GetServersRequest.newBuilder().setStartServerId(startId).build(),
observer);
verify(observer).onNext(responseCaptor.capture());
verify(observer).onCompleted();
return responseCaptor.getValue();
}
private void assertServerNotFound(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetServerResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<Exception> exceptionCaptor = ArgumentCaptor.forClass(Exception.class);
service.getServer(GetServerRequest.newBuilder().setServerId(id).build(), observer);
verify(observer).onError(exceptionCaptor.capture());
Status s = Status.fromThrowable(exceptionCaptor.getValue());
assertWithMessage(s.toString()).that(s.getCode()).isEqualTo(Status.Code.NOT_FOUND);
}
private GetServerResponse getServerHelper(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetServerResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetServerResponse> response = ArgumentCaptor.forClass(GetServerResponse.class);
service.getServer(GetServerRequest.newBuilder().setServerId(id).build(), observer);
verify(observer).onNext(response.capture());
verify(observer).onCompleted();
return response.getValue();
}
private void assertSocketNotFound(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetSocketResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<Exception> exceptionCaptor = ArgumentCaptor.forClass(Exception.class);
service.getSocket(GetSocketRequest.newBuilder().setSocketId(id).build(), observer);
verify(observer).onError(exceptionCaptor.capture());
Status s = Status.fromThrowable(exceptionCaptor.getValue());
assertWithMessage(s.toString()).that(s.getCode()).isEqualTo(Status.Code.NOT_FOUND);
}
private GetSocketResponse getSocketHelper(long id) {
@SuppressWarnings("unchecked")
StreamObserver<GetSocketResponse> observer = mock(StreamObserver.class);
ArgumentCaptor<GetSocketResponse> response
= ArgumentCaptor.forClass(GetSocketResponse.class);
service.getSocket(GetSocketRequest.newBuilder().setSocketId(id).build(), observer);
verify(observer).onNext(response.capture());
verify(observer).onCompleted();
return response.getValue();
}
}
|
ChannelzServiceTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/usage/SearchUsage.java
|
{
"start": 3224,
"end": 4689
}
|
class ____ {
/**
* A map of categories to extended data. Categories correspond to a high-level search usage statistic,
* e.g. `queries`, `rescorers`, `sections`, `retrievers`.
*
* Extended data is further segmented by name, for example collecting specific statistics for certain retrievers only.
* Finally we keep track of the set of values we are tracking for each category and name.
*/
private final Map<String, Map<String, Set<String>>> categoriesToExtendedUsage = new HashMap<>();
public void initialize(String category, String name) {
categoriesToExtendedUsage.computeIfAbsent(category, k -> new HashMap<>()).computeIfAbsent(name, k -> new HashSet<>());
}
public void track(String category, String name, String value) {
categoriesToExtendedUsage.computeIfAbsent(category, k -> new HashMap<>())
.computeIfAbsent(name, k -> new HashSet<>())
.add(value);
}
public void track(String category, String name, Set<String> values) {
categoriesToExtendedUsage.computeIfAbsent(category, k -> new HashMap<>())
.computeIfAbsent(name, k -> new HashSet<>())
.addAll(values);
}
public Map<String, Map<String, Set<String>>> getUsage() {
return Collections.unmodifiableMap(categoriesToExtendedUsage);
}
}
}
|
ExtendedUsageTracker
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/search/arguments/SugAddArgs.java
|
{
"start": 612,
"end": 775
}
|
class ____<K, V> {
private boolean incr;
private V payload;
/**
* Builder entry points for {@link SugAddArgs}.
*/
public static
|
SugAddArgs
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBSegmentedBytesStore.java
|
{
"start": 857,
"end": 1353
}
|
class ____ extends AbstractRocksDBSegmentedBytesStore<KeyValueSegment> {
RocksDBSegmentedBytesStore(final String name,
final String metricsScope,
final long retention,
final long segmentInterval,
final KeySchema keySchema) {
super(name, retention, keySchema, new KeyValueSegments(name, metricsScope, retention, segmentInterval));
}
}
|
RocksDBSegmentedBytesStore
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/HandlerMethodMappingTests.java
|
{
"start": 7077,
"end": 8873
}
|
class ____ extends AbstractHandlerMethodMapping<String> {
private PathPatternParser parser = new PathPatternParser();
private final List<String> matches = new ArrayList<>();
public List<String> getMatches() {
return this.matches;
}
@Override
protected boolean isHandler(Class<?> beanType) {
return true;
}
@Override
protected String getMappingForMethod(Method method, Class<?> handlerType) {
String methodName = method.getName();
return methodName.startsWith("handler") ? methodName : null;
}
@Override
protected Set<String> getDirectPaths(String mapping) {
return (parser.parse(mapping).hasPatternSyntax() ?
Collections.emptySet() : Collections.singleton(mapping));
}
@Override
protected CorsConfiguration initCorsConfiguration(Object handler, Method method, String mapping) {
CrossOrigin crossOrigin = AnnotatedElementUtils.findMergedAnnotation(method, CrossOrigin.class);
if (crossOrigin != null) {
CorsConfiguration corsConfig = new CorsConfiguration();
corsConfig.setAllowedOrigins(Collections.singletonList("https://domain.com"));
return corsConfig;
}
return null;
}
@Override
protected String getMatchingMapping(String pattern, ServerWebExchange exchange) {
PathContainer lookupPath = exchange.getRequest().getPath().pathWithinApplication();
PathPattern parsedPattern = this.parser.parse(pattern);
String match = parsedPattern.matches(lookupPath) ? pattern : null;
if (match != null) {
matches.add(match);
}
return match;
}
@Override
protected Comparator<String> getMappingComparator(ServerWebExchange exchange) {
return (o1, o2) -> PathPattern.SPECIFICITY_COMPARATOR.compare(parser.parse(o1), parser.parse(o2));
}
}
@Controller
private static
|
MyHandlerMethodMapping
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/observers/ParameterizedPayloadTest.java
|
{
"start": 1721,
"end": 2450
}
|
class ____ {
private AtomicReference<List<? extends Number>> intList;
private AtomicReference<List<String>> strList;
@PostConstruct
void init() {
intList = new AtomicReference<>();
strList = new AtomicReference<>();
}
<T extends List<? extends Number>> void observeIntList(@Observes T value) {
intList.set(value);
}
List<? extends Number> getIntList() {
return intList.get();
}
void observeStrList(@Observes List<String> value) {
strList.set(value);
}
List<String> getStrList() {
return strList.get();
}
}
@Dependent
static
|
ListObserver
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_lenolix_10.java
|
{
"start": 1176,
"end": 1740
}
|
class ____ {
private int id;
private Boolean isBoy;
private String name;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Boolean getIsBoy() {
return isBoy;
}
public void setIsBoy(Boolean isBoy) {
this.isBoy = isBoy;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
|
User
|
java
|
mockito__mockito
|
mockito-integration-tests/junit-jupiter-inline-mock-maker-extension-tests/src/test/java/org/mockitousage/NoExtendsTest.java
|
{
"start": 330,
"end": 661
}
|
class ____ {
@Mock private MockedStatic<Dummy> staticMethod;
@Mock private MockedConstruction<Dummy> construction;
@Test
void runsStaticMethods() {
assertThat(Dummy.foo()).isNull();
}
@Test
void runsConstruction() {
assertThat(new Dummy().bar()).isNull();
}
static
|
NoExtendsTest
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ChangelogModeJsonSerdeTest.java
|
{
"start": 1418,
"end": 2085
}
|
class ____ {
@ParameterizedTest
@ValueSource(booleans = {true, false})
void testChangelogModeSerde(boolean keyOnlyDeletes) throws IOException {
ChangelogMode changelogMode =
ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.DELETE)
.addContainedKind(RowKind.UPDATE_AFTER)
.addContainedKind(RowKind.UPDATE_BEFORE)
.keyOnlyDeletes(keyOnlyDeletes)
.build();
testJsonRoundTrip(changelogMode, ChangelogMode.class);
}
}
|
ChangelogModeJsonSerdeTest
|
java
|
quarkusio__quarkus
|
integration-tests/infinispan-cache/src/main/java/io/quarkus/it/cache/infinispan/SunriseRestClient.java
|
{
"start": 519,
"end": 1548
}
|
interface ____ {
String CACHE_NAME = "sunrise-cache";
@GET
@Path("time/{city}")
@CacheResult(cacheName = CACHE_NAME)
String getSunriseTime(@RestPath String city, @RestQuery String date);
@GET
@Path("time/{city}")
@CacheResult(cacheName = CACHE_NAME)
Uni<String> getAsyncSunriseTime(@RestPath String city, @RestQuery String date);
@GET
@Path("invocations")
Integer getSunriseTimeInvocations();
/*
* The following methods wouldn't make sense in a real-life application but it's not relevant here. We only need to check if
* the caching annotations work as intended with the rest-client extension.
*/
@DELETE
@Path("invalidate/{city}")
@CacheInvalidate(cacheName = CACHE_NAME)
Uni<Void> invalidate(@CacheKey @RestPath String city, @RestQuery String notPartOfTheCacheKey,
@CacheKey @RestPath String date);
@DELETE
@Path("invalidate")
@CacheInvalidateAll(cacheName = CACHE_NAME)
void invalidateAll();
}
|
SunriseRestClient
|
java
|
google__dagger
|
javatests/artifacts/hilt-android/viewmodel/app/src/main/java/dagger/hilt/viewmodel/SimpleActivity.java
|
{
"start": 1157,
"end": 1786
}
|
class ____ extends AppCompatActivity {
private static final String TAG = SimpleActivity.class.getSimpleName();
@Inject
@ActivityRetainedSavedState
SavedStateHandle savedStateHandle;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
SimpleViewModel viewModel = new ViewModelProvider(this).get(SimpleViewModel.class);
savedStateHandle.set("some_key", "some_content");
setContentView(R.layout.activity_main);
((TextView) findViewById(R.id.greeting))
.setText(getResources().getString(R.string.welcome, viewModel.userName));
}
}
|
SimpleActivity
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/internal/util/reflection/DummyParentClassForTests.java
|
{
"start": 174,
"end": 341
}
|
class ____ {
@SuppressWarnings("unused") // I know, I know. We're doing nasty reflection hacks here...
private String somePrivateField;
}
|
DummyParentClassForTests
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/primitives/FloatArrayAsListTest.java
|
{
"start": 1637,
"end": 3192
}
|
class ____ extends TestCase {
private static List<Float> asList(Float[] values) {
float[] temp = new float[values.length];
for (int i = 0; i < values.length; i++) {
temp[i] = checkNotNull(values[i]); // checkNotNull for GWT (do not optimize).
}
return Floats.asList(temp);
}
@J2ktIncompatible
@GwtIncompatible // suite
public static Test suite() {
List<ListTestSuiteBuilder<Float>> builders =
ImmutableList.of(
ListTestSuiteBuilder.using(new FloatsAsListGenerator()).named("Floats.asList"),
ListTestSuiteBuilder.using(new FloatsAsListHeadSubListGenerator())
.named("Floats.asList, head subList"),
ListTestSuiteBuilder.using(new FloatsAsListTailSubListGenerator())
.named("Floats.asList, tail subList"),
ListTestSuiteBuilder.using(new FloatsAsListMiddleSubListGenerator())
.named("Floats.asList, middle subList"));
TestSuite suite = new TestSuite();
for (ListTestSuiteBuilder<Float> builder : builders) {
suite.addTest(
builder
.withFeatures(
CollectionSize.ONE,
CollectionSize.SEVERAL,
CollectionFeature.RESTRICTS_ELEMENTS,
ListFeature.SUPPORTS_SET)
.createTestSuite());
}
return suite;
}
// Test generators. To let the GWT test suite generator access them, they need to be
// public named classes with a public default constructor.
public static final
|
FloatArrayAsListTest
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/async/RichAsyncFunctionTest.java
|
{
"start": 2011,
"end": 17654
}
|
class ____ {
/**
* Test the set of iteration runtime context methods in the context of a {@link
* RichAsyncFunction}.
*/
@Test
void testIterationRuntimeContext() {
RichAsyncFunction<Integer, Integer> function =
new RichAsyncFunction<Integer, Integer>() {
private static final long serialVersionUID = -2023923961609455894L;
@Override
public void asyncInvoke(Integer input, ResultFuture<Integer> resultFuture)
throws Exception {
// no op
}
};
int superstepNumber = 42;
IterationRuntimeContext mockedIterationRuntimeContext = mock(IterationRuntimeContext.class);
when(mockedIterationRuntimeContext.getSuperstepNumber()).thenReturn(superstepNumber);
function.setRuntimeContext(mockedIterationRuntimeContext);
IterationRuntimeContext iterationRuntimeContext = function.getIterationRuntimeContext();
assertThat(iterationRuntimeContext.getSuperstepNumber()).isEqualTo(superstepNumber);
assertThatThrownBy(() -> iterationRuntimeContext.getIterationAggregator("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> iterationRuntimeContext.getPreviousIterationAggregate("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
}
/** Test the set of runtime context methods in the context of a {@link RichAsyncFunction}. */
@Test
void testRuntimeContext() {
RichAsyncFunction<Integer, Integer> function =
new RichAsyncFunction<Integer, Integer>() {
private static final long serialVersionUID = 1707630162838967972L;
@Override
public void asyncInvoke(Integer input, ResultFuture<Integer> resultFuture)
throws Exception {
// no op
}
};
final String taskName = "foobarTask";
final OperatorMetricGroup metricGroup =
UnregisteredMetricsGroup.createOperatorMetricGroup();
final int numberOfParallelSubtasks = 43;
final int indexOfSubtask = 42;
final int attemptNumber = 1337;
final String taskNameWithSubtask = "foobarTask (43/43)#1337";
final Map<String, String> globalJobParameters = new HashMap<>();
globalJobParameters.put("k1", "v1");
final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
final boolean isObjectReused = true;
RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
TaskInfo mockedTaskInfo = mock(TaskInfo.class);
when(mockedTaskInfo.getTaskName()).thenReturn(taskName);
when(mockedTaskInfo.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks);
when(mockedTaskInfo.getIndexOfThisSubtask()).thenReturn(indexOfSubtask);
when(mockedTaskInfo.getAttemptNumber()).thenReturn(attemptNumber);
when(mockedTaskInfo.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask);
when(mockedRuntimeContext.getTaskInfo()).thenReturn(mockedTaskInfo);
when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
when(mockedRuntimeContext.getGlobalJobParameters()).thenReturn(globalJobParameters);
when(mockedRuntimeContext.isObjectReuseEnabled()).thenReturn(isObjectReused);
when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
function.setRuntimeContext(mockedRuntimeContext);
RuntimeContext runtimeContext = function.getRuntimeContext();
assertThat(runtimeContext.getTaskInfo().getTaskName()).isEqualTo(taskName);
assertThat(runtimeContext.getMetricGroup()).isEqualTo(metricGroup);
assertThat(runtimeContext.getTaskInfo().getNumberOfParallelSubtasks())
.isEqualTo(numberOfParallelSubtasks);
assertThat(runtimeContext.getTaskInfo().getIndexOfThisSubtask()).isEqualTo(indexOfSubtask);
assertThat(runtimeContext.getTaskInfo().getAttemptNumber()).isEqualTo(attemptNumber);
assertThat(runtimeContext.getTaskInfo().getTaskNameWithSubtasks())
.isEqualTo(taskNameWithSubtask);
assertThat(runtimeContext.getGlobalJobParameters()).isEqualTo(globalJobParameters);
assertThat(runtimeContext.isObjectReuseEnabled()).isEqualTo(isObjectReused);
assertThat(runtimeContext.getUserCodeClassLoader()).isEqualTo(userCodeClassLoader);
assertThatThrownBy(runtimeContext::getDistributedCache)
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getState(
new ValueStateDescriptor<>("foobar", Integer.class, 42)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getListState(
new ListStateDescriptor<>("foobar", Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getReducingState(
new ReducingStateDescriptor<>(
"foobar",
new ReduceFunction<Integer>() {
private static final long serialVersionUID =
2136425961884441050L;
@Override
public Integer reduce(
Integer value1, Integer value2) {
return value1;
}
},
Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getAggregatingState(
new AggregatingStateDescriptor<>(
"foobar",
new AggregateFunction<Integer, Integer, Integer>() {
@Override
public Integer createAccumulator() {
return null;
}
@Override
public Integer add(
Integer value, Integer accumulator) {
return null;
}
@Override
public Integer getResult(Integer accumulator) {
return null;
}
@Override
public Integer merge(Integer a, Integer b) {
return null;
}
},
Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getMapState(
new MapStateDescriptor<>(
"foobar", Integer.class, String.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getState(
new org.apache.flink.api.common.state.v2
.ValueStateDescriptor<>("foobar", Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getListState(
new org.apache.flink.api.common.state.v2
.ListStateDescriptor<>("foobar", Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getReducingState(
new org.apache.flink.api.common.state.v2
.ReducingStateDescriptor<>(
"foobar",
new ReduceFunction<Integer>() {
private static final long serialVersionUID =
2136425961884441050L;
@Override
public Integer reduce(
Integer value1, Integer value2) {
return value1;
}
},
Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getAggregatingState(
new org.apache.flink.api.common.state.v2
.AggregatingStateDescriptor<>(
"foobar",
new AggregateFunction<Integer, Integer, Integer>() {
@Override
public Integer createAccumulator() {
return null;
}
@Override
public Integer add(
Integer value, Integer accumulator) {
return null;
}
@Override
public Integer getResult(Integer accumulator) {
return null;
}
@Override
public Integer merge(Integer a, Integer b) {
return null;
}
},
Integer.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getMapState(
new org.apache.flink.api.common.state.v2
.MapStateDescriptor<>(
"foobar", Integer.class, String.class)))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.addAccumulator(
"foobar",
new Accumulator<Integer, Integer>() {
private static final long serialVersionUID =
-4673320336846482358L;
@Override
public void add(Integer value) {
// no op
}
@Override
public Integer getLocalValue() {
return null;
}
@Override
public void resetLocal() {}
@Override
public void merge(
Accumulator<Integer, Integer> other) {}
@Override
public Accumulator<Integer, Integer> clone() {
return null;
}
}))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getAccumulator("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getIntCounter("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getLongCounter("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getDoubleCounter("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getHistogram("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getBroadcastVariable("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.hasBroadcastVariable("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(() -> runtimeContext.getBroadcastVariable("foobar"))
.isInstanceOf(UnsupportedOperationException.class);
assertThatThrownBy(
() ->
runtimeContext.getBroadcastVariableWithInitializer(
"foobar", data -> null))
.isInstanceOf(UnsupportedOperationException.class);
}
}
|
RichAsyncFunctionTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/uuid/random2/Book.java
|
{
"start": 548,
"end": 1074
}
|
class ____ {
@Id
@GeneratedValue
@UuidGenerator(style = RANDOM)
private UUID id;
@Basic
private String name;
//end::example-identifiers-generators-uuid-implicit[]
protected Book() {
// for Hibernate use
}
public Book(String name) {
this.name = name;
}
public UUID getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
//tag::example-identifiers-generators-uuid-implicit[]
}
//end::example-identifiers-generators-uuid-implicit[]
|
Book
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/util/ClassNameComparator.java
|
{
"start": 686,
"end": 960
}
|
class ____ implements Comparator<Class<?>> {
public static final ClassNameComparator INSTANCE = new ClassNameComparator();
@Override
public int compare(Class<?> class1, Class<?> class2) {
return class1.getName().compareTo(class2.getName());
}
}
|
ClassNameComparator
|
java
|
quarkusio__quarkus
|
integration-tests/vertx/src/main/java/io/quarkus/it/vertx/EventBusConsumer.java
|
{
"start": 180,
"end": 682
}
|
class ____ {
@ConsumeEvent("pets")
public String sayHi(Pet pet) {
return "Hello " + pet.getName() + " (" + pet.getKind() + ")";
}
@ConsumeEvent("persons")
public String name(String name) {
return "Hello " + name;
}
@ConsumeEvent("person-headers")
public String personWithHeader(MultiMap headers, Person person) {
String s = "Hello " + person.getFirstName() + " " + person.getLastName() + ", " + headers;
return s;
}
}
|
EventBusConsumer
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/event/test/self_inject/MyEventPublisher.java
|
{
"start": 880,
"end": 1081
}
|
class ____ {
@Autowired
private ApplicationEventPublisher eventPublisher;
public void publishMyEvent(String message) {
eventPublisher.publishEvent(new MyEvent(this, message));
}
}
|
MyEventPublisher
|
java
|
spring-projects__spring-framework
|
spring-tx/src/test/java/org/springframework/transaction/annotation/AnnotationTransactionAttributeSourceTests.java
|
{
"start": 17392,
"end": 17780
}
|
class ____ implements ITestEjb {
private String name;
private int age;
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public int getAge() {
return age;
}
@Override
public void setAge(int age) {
this.age = age;
}
}
}
@Nested
|
Ejb3AnnotatedBean3
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1200/Issue1272_IgnoreError.java
|
{
"start": 263,
"end": 512
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
String text = JSON.toJSONString(new Point(), SerializerFeature.IgnoreErrorGetter);
assertEquals("{}", text);
}
public static
|
Issue1272_IgnoreError
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/util/CompressedSerializedValueTest.java
|
{
"start": 1134,
"end": 2802
}
|
class ____ {
@Test
void testSimpleValue() throws Exception {
final String value = "teststring";
CompressedSerializedValue<String> v = CompressedSerializedValue.fromObject(value);
CompressedSerializedValue<String> copy = CommonTestUtils.createCopySerializable(v);
assertThat(v.deserializeValue(getClass().getClassLoader())).isEqualTo(value);
assertThat(copy.deserializeValue(getClass().getClassLoader())).isEqualTo(value);
assertThat(copy).isEqualTo(v);
assertThat(copy).hasSameHashCodeAs(v.hashCode());
assertThat(v.toString()).isNotNull();
assertThat(copy.toString()).isNotNull();
assertThat(v.getSize()).isNotEqualTo(0);
assertThat(copy.getByteArray()).isEqualTo(v.getByteArray());
byte[] bytes = v.getByteArray();
CompressedSerializedValue<String> saved =
CompressedSerializedValue.fromBytes(Arrays.copyOf(bytes, bytes.length));
assertThat(saved).isEqualTo(v);
assertThat(saved.getByteArray()).isEqualTo(v.getByteArray());
}
@Test
void testNullValue() {
assertThatThrownBy(() -> CompressedSerializedValue.fromObject(null))
.isInstanceOf(NullPointerException.class);
}
@Test
void testFromNullBytes() {
assertThatThrownBy(() -> CompressedSerializedValue.fromBytes(null))
.isInstanceOf(NullPointerException.class);
}
@Test
void testFromEmptyBytes() {
assertThatThrownBy(() -> CompressedSerializedValue.fromBytes(new byte[0]))
.isInstanceOf(IllegalArgumentException.class);
}
}
|
CompressedSerializedValueTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/customized/QuarkusJtaPlatform.java
|
{
"start": 673,
"end": 3431
}
|
class ____ implements JtaPlatform, TransactionManagerAccess {
public static final QuarkusJtaPlatform INSTANCE = new QuarkusJtaPlatform();
private volatile TransactionSynchronizationRegistry transactionSynchronizationRegistry;
private volatile TransactionManager transactionManager;
private volatile UserTransaction userTransaction;
private QuarkusJtaPlatform() {
//nothing
}
public TransactionSynchronizationRegistry retrieveTransactionSynchronizationRegistry() {
TransactionSynchronizationRegistry transactionSynchronizationRegistry = this.transactionSynchronizationRegistry;
if (transactionSynchronizationRegistry == null) {
transactionSynchronizationRegistry = Arc.container().instance(TransactionSynchronizationRegistry.class).get();
this.transactionSynchronizationRegistry = transactionSynchronizationRegistry;
}
return transactionSynchronizationRegistry;
}
@Override
public TransactionManager retrieveTransactionManager() {
TransactionManager transactionManager = this.transactionManager;
if (transactionManager == null) {
transactionManager = com.arjuna.ats.jta.TransactionManager.transactionManager();
this.transactionManager = transactionManager;
}
return transactionManager;
}
@Override
public TransactionManager getTransactionManager() {
return retrieveTransactionManager();
}
@Override
public UserTransaction retrieveUserTransaction() {
UserTransaction userTransaction = this.userTransaction;
if (userTransaction == null) {
userTransaction = com.arjuna.ats.jta.UserTransaction.userTransaction();
this.userTransaction = userTransaction;
}
return userTransaction;
}
@Override
public Object getTransactionIdentifier(final Transaction transaction) {
return transaction;
}
@Override
public void registerSynchronization(Synchronization synchronization) {
try {
getTransactionManager().getTransaction().registerSynchronization(synchronization);
} catch (Exception e) {
throw new JtaPlatformException("Could not access JTA Transaction to register synchronization", e);
}
}
@Override
public boolean canRegisterSynchronization() {
// no need to check STATUS_MARKED_ROLLBACK since synchronizations can't be registered in that state
return retrieveTransactionSynchronizationRegistry().getTransactionStatus() == STATUS_ACTIVE;
}
@Override
public int getCurrentStatus() throws SystemException {
return this.retrieveTransactionManager().getStatus();
}
}
|
QuarkusJtaPlatform
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/scaling/MultipleGrpcVerticlesTest.java
|
{
"start": 501,
"end": 1178
}
|
class ____ extends ScalingTestBase {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addPackage(GreeterGrpc.class.getPackage())
.addClass(ThreadReturningGreeterService.class))
.withConfigurationResource("multiple-instances-config.properties");
@Test
public void shouldUseMultipleThreads() throws InterruptedException, TimeoutException, ExecutionException {
Set<String> threads = getThreadsUsedFor100Requests();
assertThat(threads).hasSizeGreaterThan(1);
}
}
|
MultipleGrpcVerticlesTest
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/OpenshiftBuildsEndpointBuilderFactory.java
|
{
"start": 11627,
"end": 15303
}
|
interface ____
extends
EndpointProducerBuilder {
default OpenshiftBuildsEndpointBuilder basic() {
return (OpenshiftBuildsEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedOpenshiftBuildsEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedOpenshiftBuildsEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedOpenshiftBuildsEndpointBuilder connectionTimeout(Integer connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout in milliseconds to use when making requests to the
* Kubernetes API server.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default AdvancedOpenshiftBuildsEndpointBuilder connectionTimeout(String connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
}
public
|
AdvancedOpenshiftBuildsEndpointBuilder
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-resource-server/src/main/java/org/springframework/security/oauth2/server/resource/OAuth2ProtectedResourceMetadata.java
|
{
"start": 1558,
"end": 2416
}
|
class ____
implements OAuth2ProtectedResourceMetadataClaimAccessor, Serializable {
@Serial
private static final long serialVersionUID = -18589911827039000L;
private final Map<String, Object> claims;
private OAuth2ProtectedResourceMetadata(Map<String, Object> claims) {
Assert.notEmpty(claims, "claims cannot be empty");
this.claims = Collections.unmodifiableMap(new LinkedHashMap<>(claims));
}
/**
* Returns the metadata as claims.
* @return a {@code Map} of the metadata as claims
*/
public Map<String, Object> getClaims() {
return this.claims;
}
/**
* Constructs a new {@link Builder} with empty claims.
* @return the {@link Builder}
*/
public static Builder builder() {
return new Builder();
}
/**
* Helps configure an {@link OAuth2ProtectedResourceMetadata}.
*/
public static final
|
OAuth2ProtectedResourceMetadata
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/OpenstackNovaComponentBuilderFactory.java
|
{
"start": 1881,
"end": 4002
}
|
interface ____ extends ComponentBuilder<NovaComponent> {
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default OpenstackNovaComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default OpenstackNovaComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
OpenstackNovaComponentBuilder
|
java
|
quarkusio__quarkus
|
integration-tests/main/src/main/java/io/quarkus/it/faulttolerance/MyFaultToleranceError.java
|
{
"start": 46,
"end": 135
}
|
class ____ extends Error {
public MyFaultToleranceError() {
}
}
|
MyFaultToleranceError
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/Supplier.java
|
{
"start": 917,
"end": 1304
}
|
interface ____ how to supply a value.
*
* <p>This is a <a href="https://docs.oracle.com/javase/8/docs/api/java/util/function/package-summary.html">functional
* interface</a> intended to support lambda expressions in log4j 2.
*
* <p>Implementors are free to cache values or return a new or distinct value each time the supplier is invoked.
*
* <p><strong>DEPRECATED:</strong> this
|
know
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/support/ContextLoaderUtilsContextHierarchyTests.java
|
{
"start": 19587,
"end": 19762
}
|
class ____ extends TestClass1WithSingleLevelContextHierarchy {
}
@ContextHierarchy(@ContextConfiguration("three.xml"))
private static
|
TestClass2WithSingleLevelContextHierarchy
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/iterative/concurrent/BlockingBackChannelTest.java
|
{
"start": 3130,
"end": 4710
}
|
class ____ implements Runnable {
private final BlockingBackChannel backChannel;
private final BlockingQueue<Integer> dataChannel;
private final Random random;
private final List<String> actionLog;
IterationHead(
BlockingBackChannel backChannel,
BlockingQueue<Integer> dataChannel,
List<String> actionLog) {
this.backChannel = backChannel;
this.dataChannel = dataChannel;
this.actionLog = actionLog;
random = new Random();
}
@Override
public void run() {
processInputAndSendMessageThroughDataChannel();
for (int n = 0; n < NUM_ITERATIONS; n++) {
try {
backChannel.getReadEndAfterSuperstepEnded();
actionLog.add("head reads in iteration " + n);
Thread.sleep(random.nextInt(100));
// we don't send through the data channel in the last iteration, we would send
// to the output task
if (n != NUM_ITERATIONS - 1) {
processInputAndSendMessageThroughDataChannel();
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
void processInputAndSendMessageThroughDataChannel() {
actionLog.add("head sends data");
dataChannel.offer(INPUT_COMPLETELY_PROCESSED_MESSAGE);
}
}
|
IterationHead
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java
|
{
"start": 8614,
"end": 8815
}
|
class ____ {
@GET
@Path("/registered")
public boolean isRegistered() {
return true;
}
}
}
}
|
IntegrationTestRestExtensionResource
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
|
{
"start": 2443,
"end": 5751
}
|
class ____
extends BlockAliasMap<FileRegion> implements Configurable {
private Configuration conf;
private ReaderOptions readerOpts = TextReader.defaults();
private WriterOptions writerOpts = TextWriter.defaults();
public static final Logger LOG =
LoggerFactory.getLogger(TextFileRegionAliasMap.class);
@Override
public void setConf(Configuration conf) {
readerOpts.setConf(conf);
writerOpts.setConf(conf);
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public Reader<FileRegion> getReader(Reader.Options opts, String blockPoolID)
throws IOException {
if (null == opts) {
opts = readerOpts;
}
if (!(opts instanceof ReaderOptions)) {
throw new IllegalArgumentException("Invalid options " + opts.getClass());
}
ReaderOptions o = (ReaderOptions) opts;
Configuration readerConf = (null == o.getConf())
? new Configuration()
: o.getConf();
return createReader(o.file, o.delim, readerConf, blockPoolID);
}
@VisibleForTesting
TextReader createReader(Path file, String delim, Configuration cfg,
String blockPoolID) throws IOException {
FileSystem fs = file.getFileSystem(cfg);
if (fs instanceof LocalFileSystem) {
fs = ((LocalFileSystem)fs).getRaw();
}
CompressionCodecFactory factory = new CompressionCodecFactory(cfg);
CompressionCodec codec = factory.getCodec(file);
String filename = fileNameFromBlockPoolID(blockPoolID);
if (codec != null) {
filename = filename + codec.getDefaultExtension();
}
Path bpidFilePath = new Path(file.getParent(), filename);
return new TextReader(fs, bpidFilePath, codec, delim);
}
@Override
public Writer<FileRegion> getWriter(Writer.Options opts, String blockPoolID)
throws IOException {
if (null == opts) {
opts = writerOpts;
}
if (!(opts instanceof WriterOptions)) {
throw new IllegalArgumentException("Invalid options " + opts.getClass());
}
WriterOptions o = (WriterOptions) opts;
Configuration cfg = (null == o.getConf())
? new Configuration()
: o.getConf();
String baseName = fileNameFromBlockPoolID(blockPoolID);
Path blocksFile = new Path(o.dir, baseName);
if (o.codec != null) {
CompressionCodecFactory factory = new CompressionCodecFactory(cfg);
CompressionCodec codec = factory.getCodecByName(o.codec);
blocksFile = new Path(o.dir, baseName + codec.getDefaultExtension());
return createWriter(blocksFile, codec, o.delim, cfg);
}
return createWriter(blocksFile, null, o.delim, conf);
}
@VisibleForTesting
TextWriter createWriter(Path file, CompressionCodec codec, String delim,
Configuration cfg) throws IOException {
FileSystem fs = file.getFileSystem(cfg);
if (fs instanceof LocalFileSystem) {
fs = ((LocalFileSystem)fs).getRaw();
}
OutputStream tmp = fs.create(file);
java.io.Writer out = new BufferedWriter(new OutputStreamWriter(
(null == codec) ? tmp : codec.createOutputStream(tmp), StandardCharsets.UTF_8));
return new TextWriter(out, delim);
}
/**
* Class specifying reader options for the {@link TextFileRegionAliasMap}.
*/
public static
|
TextFileRegionAliasMap
|
java
|
junit-team__junit5
|
junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/descriptor/DynamicDescendantFilter.java
|
{
"start": 2392,
"end": 2914
}
|
enum ____ {
EXPLICIT, ALLOW_ALL
}
public DynamicDescendantFilter copy(UnaryOperator<UniqueId> uniqueIdTransformer) {
return configure(uniqueIdTransformer, new DynamicDescendantFilter());
}
protected DynamicDescendantFilter configure(UnaryOperator<UniqueId> uniqueIdTransformer,
DynamicDescendantFilter copy) {
this.allowedUniqueIds.stream().map(uniqueIdTransformer).forEach(copy.allowedUniqueIds::add);
copy.allowedIndices.addAll(this.allowedIndices);
copy.mode = this.mode;
return copy;
}
private
|
Mode
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/InternalManagedChannelBuilder.java
|
{
"start": 707,
"end": 1194
}
|
class ____ {
private InternalManagedChannelBuilder() {}
public static <T extends ManagedChannelBuilder<T>> T interceptWithTarget(
ManagedChannelBuilder<T> builder, InternalInterceptorFactory factory) {
return builder.interceptWithTarget(factory);
}
public static <T extends ManagedChannelBuilder<T>> T addMetricSink(
ManagedChannelBuilder<T> builder, MetricSink metricSink) {
return builder.addMetricSink(metricSink);
}
public
|
InternalManagedChannelBuilder
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/core/JmsClient.java
|
{
"start": 9769,
"end": 10681
}
|
class ____ convert the payload to
* @return the payload of the {@link Message} received,
* or {@link Optional#empty()} if none
* @param messageSelector the JMS message selector to apply
* @see #withReceiveTimeout
*/
<T> Optional<T> receive(String messageSelector, Class<T> targetClass) throws MessagingException;
/**
* Send a request message and receive the reply from the given destination.
* @param requestMessage the spring-messaging {@link Message} to send
* @return the spring-messaging {@link Message} received as a reply,
* or {@link Optional#empty()} if none
* @see #withReceiveTimeout
*/
Optional<Message<?>> sendAndReceive(Message<?> requestMessage) throws MessagingException;
/**
* Send a request message and receive the reply from the given destination.
* @param request the payload to convert into a request {@link Message}
* @param targetClass the
|
to
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/HttpSecuritySecurityMatchersNoMvcTests.java
|
{
"start": 4611,
"end": 4976
}
|
class ____ {
@Bean
SecurityFilterChain appSecurity(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatcher("/path")
.httpBasic(withDefaults())
.authorizeHttpRequests((authorize) -> authorize
.anyRequest().denyAll());
// @formatter:on
return http.build();
}
@RestController
static
|
SecurityMatcherNoMvcConfig
|
java
|
apache__kafka
|
raft/src/main/java/org/apache/kafka/raft/internals/ThresholdPurgatory.java
|
{
"start": 1126,
"end": 3322
}
|
class ____<T extends Comparable<T>> implements FuturePurgatory<T> {
private final AtomicLong idGenerator = new AtomicLong(0);
private final ExpirationService expirationService;
private final ConcurrentNavigableMap<ThresholdKey<T>, CompletableFuture<Long>> thresholdMap =
new ConcurrentSkipListMap<>();
public ThresholdPurgatory(ExpirationService expirationService) {
this.expirationService = expirationService;
}
@Override
public CompletableFuture<Long> await(T threshold, long maxWaitTimeMs) {
ThresholdKey<T> key = new ThresholdKey<>(idGenerator.incrementAndGet(), threshold);
CompletableFuture<Long> future = expirationService.failAfter(maxWaitTimeMs);
thresholdMap.put(key, future);
future.whenComplete((timeMs, exception) -> thresholdMap.remove(key));
return future;
}
@Override
public void maybeComplete(T value, long currentTimeMs) {
ThresholdKey<T> maxKey = new ThresholdKey<>(Long.MAX_VALUE, value);
NavigableMap<ThresholdKey<T>, CompletableFuture<Long>> submap = thresholdMap.headMap(maxKey);
for (CompletableFuture<Long> completion : submap.values()) {
completion.complete(currentTimeMs);
}
}
@Override
public void completeAll(long currentTimeMs) {
for (CompletableFuture<Long> completion : thresholdMap.values()) {
completion.complete(currentTimeMs);
}
}
@Override
public void completeAllExceptionally(Throwable exception) {
for (CompletableFuture<Long> completion : thresholdMap.values()) {
completion.completeExceptionally(exception);
}
}
@Override
public int numWaiting() {
return thresholdMap.size();
}
private record ThresholdKey<T extends Comparable<T>>(long id, T threshold) implements Comparable<ThresholdKey<T>> {
@Override
public int compareTo(ThresholdKey<T> o) {
int res = this.threshold.compareTo(o.threshold);
if (res != 0) {
return res;
} else {
return Long.compare(this.id, o.id);
}
}
}
}
|
ThresholdPurgatory
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java
|
{
"start": 2895,
"end": 18679
}
|
class ____ extends MockScriptPlugin {
@Override
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
scripts.put("log(doc['index'].value + (factor * _score))", vars -> scoringScript(vars, ScoreAccessor::doubleValue));
scripts.put("log(doc['index'].value + (factor * _score.intValue()))", vars -> scoringScript(vars, ScoreAccessor::intValue));
scripts.put("log(doc['index'].value + (factor * _score.longValue()))", vars -> scoringScript(vars, ScoreAccessor::longValue));
scripts.put("log(doc['index'].value + (factor * _score.floatValue()))", vars -> scoringScript(vars, ScoreAccessor::floatValue));
scripts.put(
"log(doc['index'].value + (factor * _score.doubleValue()))",
vars -> scoringScript(vars, ScoreAccessor::doubleValue)
);
return scripts;
}
static Double scoringScript(Map<String, Object> vars, Function<ScoreAccessor, Number> scoring) {
Map<?, ?> doc = (Map) vars.get("doc");
Double index = ((Number) ((ScriptDocValues<?>) doc.get("index")).get(0)).doubleValue();
Double score = scoring.apply((ScoreAccessor) vars.get("_score")).doubleValue();
Integer factor = (Integer) vars.get("factor");
return Math.log(index + (factor * score));
}
}
public void testConsistentHitsWithSameSeed() throws Exception {
createIndex("test");
ensureGreen(); // make sure we are done otherwise preference could change?
int docCount = randomIntBetween(100, 200);
for (int i = 0; i < docCount; i++) {
index("test", "" + i, jsonBuilder().startObject().field("foo", i).endObject());
}
flush();
refresh();
int outerIters = scaledRandomIntBetween(10, 20);
for (int o = 0; o < outerIters; o++) {
final int seed = randomInt();
String preference = randomRealisticUnicodeOfLengthBetween(1, 10); // at least one char!!
// randomPreference should not start with '_' (reserved for known preference types (e.g. _shards)
while (preference.startsWith("_")) {
preference = randomRealisticUnicodeOfLengthBetween(1, 10);
}
int innerIters = scaledRandomIntBetween(2, 5);
final SearchHit[][] hits = new SearchHit[1][];
for (int i = 0; i < innerIters; i++) {
final int finalI = i;
assertResponse(
prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking
.setPreference(preference)
.setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))),
response -> {
assertThat(
"Failures " + Arrays.toString(response.getShardFailures()),
response.getShardFailures().length,
CoreMatchers.equalTo(0)
);
final int hitCount = response.getHits().getHits().length;
final SearchHit[] currentHits = response.getHits().asUnpooled().getHits();
ArrayUtil.timSort(currentHits, (o1, o2) -> {
// for tie-breaking we have to resort here since if the score is
// identical we rely on collection order which might change.
int cmp = Float.compare(o1.getScore(), o2.getScore());
return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp;
});
if (finalI == 0) {
assertThat(hits[0], nullValue());
hits[0] = currentHits;
} else {
assertThat(hits[0].length, equalTo(response.getHits().getHits().length));
for (int j = 0; j < hitCount; j++) {
assertThat("" + j, currentHits[j].getScore(), equalTo(hits[0][j].getScore()));
assertThat("" + j, currentHits[j].getId(), equalTo(hits[0][j].getId()));
}
}
}
);
// randomly change some docs to get them in different segments
int numDocsToChange = randomIntBetween(20, 50);
while (numDocsToChange > 0) {
int doc = randomInt(docCount - 1);// watch out this is inclusive the max values!
index("test", "" + doc, jsonBuilder().startObject().field("foo", doc).endObject());
--numDocsToChange;
}
flush();
refresh();
}
}
}
public void testScoreAccessWithinScript() throws Exception {
assertAcked(
prepareCreate("test").setMapping(
"body",
"type=text",
"index",
"type=" + randomFrom("short", "float", "long", "integer", "double")
)
);
int docCount = randomIntBetween(100, 200);
for (int i = 0; i < docCount; i++) {
prepareIndex("test").setId("" + i)
// we add 1 to the index field to make sure that the scripts below never compute log(0)
.setSource("body", randomFrom(Arrays.asList("foo", "bar", "baz")), "index", i + 1)
.get();
}
refresh();
Map<String, Object> params = new HashMap<>();
params.put("factor", randomIntBetween(2, 4));
// Test for accessing _score
Script script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score))", params);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(
matchQuery("body", "foo"),
new FunctionScoreQueryBuilder.FilterFunctionBuilder[] {
new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)),
new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) }
)
),
response -> {
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getScore(), greaterThan(1f));
}
);
// Test for accessing _score.intValue()
script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.intValue()))", params);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(
matchQuery("body", "foo"),
new FunctionScoreQueryBuilder.FilterFunctionBuilder[] {
new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)),
new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) }
)
),
response -> {
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getScore(), greaterThan(1f));
}
);
// Test for accessing _score.longValue()
script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.longValue()))", params);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(
matchQuery("body", "foo"),
new FunctionScoreQueryBuilder.FilterFunctionBuilder[] {
new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)),
new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) }
)
),
response -> {
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getScore(), greaterThan(1f));
}
);
// Test for accessing _score.floatValue()
script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.floatValue()))", params);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(
matchQuery("body", "foo"),
new FunctionScoreQueryBuilder.FilterFunctionBuilder[] {
new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)),
new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) }
)
),
response -> {
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getScore(), greaterThan(1f));
}
);
// Test for accessing _score.doubleValue()
script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.doubleValue()))", params);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(
matchQuery("body", "foo"),
new FunctionScoreQueryBuilder.FilterFunctionBuilder[] {
new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)),
new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) }
)
),
response -> {
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getScore(), greaterThan(1f));
}
);
}
public void testSeedReportedInExplain() throws Exception {
createIndex("test");
ensureGreen();
index("test", "1", jsonBuilder().startObject().endObject());
flush();
refresh();
int seed = 12345678;
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME)))
.setExplain(true),
response -> {
assertNoFailures(response);
assertEquals(1, response.getHits().getTotalHits().value());
SearchHit firstHit = response.getHits().getAt(0);
assertThat(firstHit.getExplanation().toString(), containsString("" + seed));
}
);
}
public void testNoDocs() throws Exception {
createIndex("test");
ensureGreen();
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(
functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME))
),
response -> assertEquals(0, response.getHits().getTotalHits().value())
);
assertNoFailuresAndResponse(
prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())),
response -> assertEquals(0, response.getHits().getTotalHits().value())
);
}
public void testScoreRange() throws Exception {
// all random scores should be in range [0.0, 1.0]
createIndex("test");
ensureGreen();
int docCount = randomIntBetween(100, 200);
for (int i = 0; i < docCount; i++) {
String id = randomRealisticUnicodeOfCodepointLengthBetween(1, 50);
index("test", id, jsonBuilder().startObject().endObject());
}
flush();
refresh();
int iters = scaledRandomIntBetween(10, 20);
for (int i = 0; i < iters; ++i) {
assertNoFailuresAndResponse(
prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).setSize(docCount),
response -> {
for (SearchHit hit : response.getHits().getHits()) {
assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f)));
}
}
);
}
}
public void testSeeds() throws Exception {
createIndex("test");
ensureGreen();
final int docCount = randomIntBetween(100, 200);
for (int i = 0; i < docCount; i++) {
index("test", "" + i, jsonBuilder().startObject().endObject());
}
flushAndRefresh();
assertNoFailures(
prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking
.setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(randomInt()).setField(SeqNoFieldMapper.NAME)))
);
assertNoFailures(
prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking
.setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(randomLong()).setField(SeqNoFieldMapper.NAME)))
);
assertNoFailures(
prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking
.setQuery(
functionScoreQuery(
matchAllQuery(),
randomFunction().seed(randomRealisticUnicodeOfLengthBetween(10, 20)).setField(SeqNoFieldMapper.NAME)
)
)
);
}
public void checkDistribution() throws Exception {
int count = 10000;
assertAcked(prepareCreate("test"));
ensureGreen();
for (int i = 0; i < count; i++) {
index("test", "" + i, jsonBuilder().startObject().endObject());
}
flush();
refresh();
int[] matrix = new int[count];
for (int i = 0; i < count; i++) {
assertResponse(
prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())),
response -> matrix[Integer.valueOf(response.getHits().getAt(0).getId())]++
);
}
int filled = 0;
int maxRepeat = 0;
int sumRepeat = 0;
for (int i = 0; i < matrix.length; i++) {
int value = matrix[i];
sumRepeat += value;
maxRepeat = Math.max(maxRepeat, value);
if (value > 0) {
filled++;
}
}
logger.info("max repeat: {}", maxRepeat);
logger.info("avg repeat: {}", sumRepeat / (double) filled);
logger.info("distribution: {}", filled / (double) count);
int percentile50 = filled / 2;
int percentile25 = (filled / 4);
int percentile75 = percentile50 + percentile25;
int sum = 0;
for (int i = 0; i < matrix.length; i++) {
if (matrix[i] == 0) {
continue;
}
sum += i * matrix[i];
if (percentile50 == 0) {
logger.info("median: {}", i);
} else if (percentile25 == 0) {
logger.info("percentile_25: {}", i);
} else if (percentile75 == 0) {
logger.info("percentile_75: {}", i);
}
percentile50--;
percentile25--;
percentile75--;
}
logger.info("mean: {}", sum / (double) count);
}
}
|
CustomScriptPlugin
|
java
|
dropwizard__dropwizard
|
dropwizard-jackson/src/main/java/io/dropwizard/jackson/AnnotationSensitivePropertyNamingStrategy.java
|
{
"start": 728,
"end": 941
}
|
class ____ extends PropertyNamingStrategy {
private static final long serialVersionUID = -1372862028366311230L;
/**
* The snake case naming strategy to use, if a
|
AnnotationSensitivePropertyNamingStrategy
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java
|
{
"start": 1782,
"end": 1840
}
|
class ____ a attribute that matches an ID of the user
*/
|
with
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/graph/StreamGraphGeneratorTest.java
|
{
"start": 49189,
"end": 50821
}
|
class ____
extends AbstractStreamOperator<Integer>
implements TwoInputStreamOperator<Integer, Integer, Integer>,
OutputTypeConfigurable<Integer> {
private static final long serialVersionUID = 1L;
TypeInformation<Integer> tpeInformation;
public TypeInformation<Integer> getTypeInformation() {
return tpeInformation;
}
@Override
public void setOutputType(
TypeInformation<Integer> outTypeInfo, ExecutionConfig executionConfig) {
tpeInformation = outTypeInfo;
}
@Override
public void processElement1(StreamRecord<Integer> element) throws Exception {
output.collect(element);
}
@Override
public void processElement2(StreamRecord<Integer> element) throws Exception {
output.collect(element);
}
@Override
public void processWatermark1(Watermark mark) throws Exception {}
@Override
public void processWatermark2(Watermark mark) throws Exception {}
@Override
public void processLatencyMarker1(LatencyMarker latencyMarker) throws Exception {
// ignore
}
@Override
public void processLatencyMarker2(LatencyMarker latencyMarker) throws Exception {
// ignore
}
@Override
protected void setup(
StreamTask<?, ?> containingTask,
StreamConfig config,
Output<StreamRecord<Integer>> output) {}
}
private static
|
OutputTypeConfigurableOperationWithTwoInputs
|
java
|
apache__camel
|
components/camel-jackson/src/main/java/org/apache/camel/component/jackson/transform/JsonStructDataTypeTransformer.java
|
{
"start": 1683,
"end": 3464
}
|
class ____ extends Transformer {
private static final byte[] EMPTY = "{}".getBytes(StandardCharsets.UTF_8);
@Override
public void transform(Message message, DataType fromType, DataType toType) {
if (message.getBody() instanceof JsonNode) {
return;
}
try {
Object unmarshalled;
String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null);
if (contentClass != null) {
Class<?> contentType
= message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass);
unmarshalled = Json.mapper().reader().forType(JsonNode.class)
.readValue(Json.mapper().writerFor(contentType).writeValueAsString(message.getBody()));
} else {
unmarshalled = Json.mapper().reader().forType(JsonNode.class).readValue(getBodyAsStream(message));
}
message.setBody(unmarshalled);
message.setHeader(Exchange.CONTENT_TYPE, MimeType.STRUCT.type());
} catch (InvalidPayloadException | IOException | ClassNotFoundException e) {
throw new CamelExecutionException("Failed to apply Json input data type on exchange", message.getExchange(), e);
}
}
private InputStream getBodyAsStream(Message message) throws InvalidPayloadException {
if (message.getBody() == null) {
return new ByteArrayInputStream(EMPTY);
}
InputStream bodyStream = message.getBody(InputStream.class);
if (bodyStream == null) {
bodyStream = new ByteArrayInputStream(message.getMandatoryBody(byte[].class));
}
return bodyStream;
}
}
|
JsonStructDataTypeTransformer
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/test/java/org/springframework/boot/web/server/servlet/context/testcomponents/filter/TestFilter.java
|
{
"start": 1033,
"end": 1438
}
|
class ____ implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
request.setAttribute("filterAttribute", "bravo");
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
}
|
TestFilter
|
java
|
quarkusio__quarkus
|
extensions/elytron-security-ldap/runtime/src/main/java/io/quarkus/elytron/security/ldap/config/CacheConfig.java
|
{
"start": 189,
"end": 601
}
|
interface ____ {
/**
* If set to true, request to the LDAP server are cached
*/
@WithDefault("false")
boolean enabled();
/**
* The duration that an entry can stay in the cache
*/
@WithDefault("60s")
Duration maxAge();
/**
* The maximum number of entries to keep in the cache
*/
@WithDefault("100")
int size();
String toString();
}
|
CacheConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/event/collection/CollectionListeners.java
|
{
"start": 4153,
"end": 7507
}
|
class ____ extends AbstractListener
implements PostCollectionUpdateEventListener {
private PostCollectionUpdateListener(CollectionListeners listeners) {
super( listeners );
}
public void onPostUpdateCollection(PostCollectionUpdateEvent event) {
addEvent( event, this );
}
}
private final PreCollectionRecreateListener preCollectionRecreateListener;
private final InitializeCollectionListener initializeCollectionListener;
private final PreCollectionRemoveListener preCollectionRemoveListener;
private final PreCollectionUpdateListener preCollectionUpdateListener;
private final PostCollectionRecreateListener postCollectionRecreateListener;
private final PostCollectionRemoveListener postCollectionRemoveListener;
private final PostCollectionUpdateListener postCollectionUpdateListener;
private List listenersCalled = new ArrayList();
private List events = new ArrayList();
public CollectionListeners( SessionFactory sf) {
preCollectionRecreateListener = new PreCollectionRecreateListener( this );
initializeCollectionListener = new InitializeCollectionListener( this );
preCollectionRemoveListener = new PreCollectionRemoveListener( this );
preCollectionUpdateListener = new PreCollectionUpdateListener( this );
postCollectionRecreateListener = new PostCollectionRecreateListener( this );
postCollectionRemoveListener = new PostCollectionRemoveListener( this );
postCollectionUpdateListener = new PostCollectionUpdateListener( this );
EventListenerRegistry registry = ( (SessionFactoryImplementor) sf ).getEventListenerRegistry();
registry.setListeners( EventType.INIT_COLLECTION, initializeCollectionListener );
registry.setListeners( EventType.PRE_COLLECTION_RECREATE, preCollectionRecreateListener );
registry.setListeners( EventType.POST_COLLECTION_RECREATE, postCollectionRecreateListener );
registry.setListeners( EventType.PRE_COLLECTION_REMOVE, preCollectionRemoveListener );
registry.setListeners( EventType.POST_COLLECTION_REMOVE, postCollectionRemoveListener );
registry.setListeners( EventType.PRE_COLLECTION_UPDATE, preCollectionUpdateListener );
registry.setListeners( EventType.POST_COLLECTION_UPDATE, postCollectionUpdateListener );
}
public void addEvent(AbstractCollectionEvent event, Listener listener) {
listenersCalled.add( listener );
events.add( event );
}
public List getListenersCalled() {
return listenersCalled;
}
public List getEvents() {
return events;
}
public void clear() {
listenersCalled.clear();
events.clear();
}
public PreCollectionRecreateListener getPreCollectionRecreateListener() { return preCollectionRecreateListener; }
public InitializeCollectionListener getInitializeCollectionListener() { return initializeCollectionListener; }
public PreCollectionRemoveListener getPreCollectionRemoveListener() { return preCollectionRemoveListener; }
public PreCollectionUpdateListener getPreCollectionUpdateListener() { return preCollectionUpdateListener; }
public PostCollectionRecreateListener getPostCollectionRecreateListener() { return postCollectionRecreateListener; }
public PostCollectionRemoveListener getPostCollectionRemoveListener() { return postCollectionRemoveListener; }
public PostCollectionUpdateListener getPostCollectionUpdateListener() { return postCollectionUpdateListener; }
}
|
PostCollectionUpdateListener
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/datasource/ConnectionHandle.java
|
{
"start": 945,
"end": 1362
}
|
interface ____ {
/**
* Fetch the JDBC Connection that this handle refers to.
*/
Connection getConnection();
/**
* Release the JDBC Connection that this handle refers to.
* <p>The default implementation is empty, assuming that the lifecycle
* of the connection is managed externally.
* @param con the JDBC Connection to release
*/
default void releaseConnection(Connection con) {
}
}
|
ConnectionHandle
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/BeforeAndAfterAllTests.java
|
{
"start": 6431,
"end": 6835
}
|
class ____ extends TopLevelTestCase {
@BeforeAll
static void beforeAll2() {
callSequence.add("beforeAllMethod-2");
}
@AfterAll
static void afterAll2() {
callSequence.add("afterAllMethod-2");
}
@Test
@Override
void test() {
callSequence.add("test-2");
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ExtendWith(QuuxClassLevelCallbacks.class)
static
|
SecondLevelTestCase
|
java
|
google__guice
|
extensions/servlet/src/com/google/inject/servlet/ManagedServletPipeline.java
|
{
"start": 1533,
"end": 6774
}
|
class ____ {
private final ServletDefinition[] servletDefinitions;
private static final TypeLiteral<ServletDefinition> SERVLET_DEFS =
TypeLiteral.get(ServletDefinition.class);
@Inject
public ManagedServletPipeline(Injector injector) {
this.servletDefinitions = collectServletDefinitions(injector);
}
boolean hasServletsMapped() {
return servletDefinitions.length > 0;
}
/**
* Introspects the injector and collects all instances of bound {@code List<ServletDefinition>}
* into a master list.
*
* <p>We have a guarantee that {@link com.google.inject.Injector#getBindings()} returns a map that
* preserves insertion order in entry-set iterators.
*/
private ServletDefinition[] collectServletDefinitions(Injector injector) {
List<ServletDefinition> servletDefinitions = Lists.newArrayList();
for (Binding<ServletDefinition> entry : injector.findBindingsByType(SERVLET_DEFS)) {
servletDefinitions.add(entry.getProvider().get());
}
// Copy to a fixed size array for speed.
return servletDefinitions.toArray(new ServletDefinition[servletDefinitions.size()]);
}
public void init(ServletContext servletContext, Injector injector) throws ServletException {
Set<HttpServlet> initializedSoFar = Sets.newIdentityHashSet();
for (ServletDefinition servletDefinition : servletDefinitions) {
servletDefinition.init(servletContext, injector, initializedSoFar);
}
}
public boolean service(ServletRequest request, ServletResponse response)
throws IOException, ServletException {
//stop at the first matching servlet and service
for (ServletDefinition servletDefinition : servletDefinitions) {
if (servletDefinition.service(request, response)) {
return true;
}
}
//there was no match...
return false;
}
public void destroy() {
Set<HttpServlet> destroyedSoFar = Sets.newIdentityHashSet();
for (ServletDefinition servletDefinition : servletDefinitions) {
servletDefinition.destroy(destroyedSoFar);
}
}
/**
* @return Returns a request dispatcher wrapped with a servlet mapped to the given path or null if
* no mapping was found.
*/
RequestDispatcher getRequestDispatcher(String path) {
final String newRequestUri = path;
// TODO(user): check servlet spec to see if the following is legal or not.
// Need to strip query string if requested...
for (final ServletDefinition servletDefinition : servletDefinitions) {
if (servletDefinition.shouldServe(path)) {
return new RequestDispatcher() {
@Override
public void forward(ServletRequest servletRequest, ServletResponse servletResponse)
throws ServletException, IOException {
Preconditions.checkState(
!servletResponse.isCommitted(),
"Response has been committed--you can only call forward before"
+ " committing the response (hint: don't flush buffers)");
// clear buffer before forwarding
servletResponse.resetBuffer();
ServletRequest requestToProcess;
if (servletRequest instanceof HttpServletRequest) {
requestToProcess = wrapRequest((HttpServletRequest) servletRequest, newRequestUri);
} else {
// This should never happen, but instead of throwing an exception
// we will allow a happy case pass thru for maximum tolerance to
// legacy (and internal) code.
requestToProcess = servletRequest;
}
// now dispatch to the servlet
doServiceImpl(servletDefinition, requestToProcess, servletResponse);
}
@Override
public void include(ServletRequest servletRequest, ServletResponse servletResponse)
throws ServletException, IOException {
// route to the target servlet
doServiceImpl(servletDefinition, servletRequest, servletResponse);
}
private void doServiceImpl(
ServletDefinition servletDefinition,
ServletRequest servletRequest,
ServletResponse servletResponse)
throws ServletException, IOException {
servletRequest.setAttribute(REQUEST_DISPATCHER_REQUEST, Boolean.TRUE);
try {
servletDefinition.doService(servletRequest, servletResponse);
} finally {
servletRequest.removeAttribute(REQUEST_DISPATCHER_REQUEST);
}
}
};
}
}
//otherwise, can't process
return null;
}
// visible for testing
static HttpServletRequest wrapRequest(HttpServletRequest request, String newUri) {
return new RequestDispatcherRequestWrapper(request, newUri);
}
/**
* A Marker constant attribute that when present in the request indicates to Guice servlet that
* this request has been generated by a request dispatcher rather than the servlet pipeline. In
* accordance with section 8.4.2 of the Servlet 2.4 specification.
*/
public static final String REQUEST_DISPATCHER_REQUEST = "jakarta.servlet.forward.servlet_path";
private static
|
ManagedServletPipeline
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/keygen/NpeCountry.java
|
{
"start": 717,
"end": 1788
}
|
class ____ {
private Integer id;
private String countryname;
private String countrycode;
public NpeCountry() {
}
public NpeCountry(String countryname, String countrycode) {
this.countryname = countryname;
this.countrycode = countrycode;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getCountryname() {
return countryname;
}
public void setCountryname(String countryname) {
this.countryname = countryname;
}
public String getCountrycode() {
return countrycode;
}
public void setCountrycode(String countrycode) {
this.countrycode = countrycode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
NpeCountry other = (NpeCountry) o;
// throws NPE when id is null
return id.equals(other.id);
}
@Override
public int hashCode() {
// throws NPE when id is null
return id.hashCode();
}
}
|
NpeCountry
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
|
{
"start": 5884,
"end": 21863
}
|
class ____ {
final int socketTimeout;
final int socketWriteTimeout;
final int socketKeepaliveTimeout;
final int ecChecksumSocketTimeout;
private final int transferSocketSendBufferSize;
private final int transferSocketRecvBufferSize;
private final boolean tcpNoDelay;
final boolean transferToAllowed;
final boolean dropCacheBehindWrites;
final boolean syncBehindWrites;
final boolean syncBehindWritesInBackground;
final boolean dropCacheBehindReads;
final boolean syncOnClose;
final boolean encryptDataTransfer;
final boolean connectToDnViaHostname;
final boolean overwriteDownstreamDerivedQOP;
private final boolean pmemCacheRecoveryEnabled;
final long readaheadLength;
final long heartBeatInterval;
private final long lifelineIntervalMs;
volatile long blockReportInterval;
volatile long blockReportSplitThreshold;
volatile boolean peerStatsEnabled;
volatile boolean diskStatsEnabled;
volatile long outliersReportIntervalMs;
final long ibrInterval;
volatile long initialBlockReportDelayMs;
volatile long cacheReportInterval;
private volatile long datanodeSlowIoWarningThresholdMs;
final String minimumNameNodeVersion;
final String encryptionAlgorithm;
final SaslPropertiesResolver saslPropsResolver;
final TrustedChannelResolver trustedChannelResolver;
private final boolean ignoreSecurePortsForTesting;
final long xceiverStopTimeout;
final long restartReplicaExpiry;
private final long processCommandsThresholdMs;
final long maxLockedMemory;
private final String[] pmemDirs;
private final long bpReadyTimeout;
// Allow LAZY_PERSIST writes from non-local clients?
private final boolean allowNonLocalLazyPersist;
private final int volFailuresTolerated;
private final int volsConfigured;
private final int maxDataLength;
private Configurable dn;
public DNConf(final Configurable dn) {
this.dn = dn;
socketTimeout = getConf().getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsConstants.READ_TIMEOUT);
socketWriteTimeout = getConf().getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsConstants.WRITE_TIMEOUT);
socketKeepaliveTimeout = getConf().getInt(
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
ecChecksumSocketTimeout = getConf().getInt(
DFS_CHECKSUM_EC_SOCKET_TIMEOUT_KEY,
DFS_CHECKSUM_EC_SOCKET_TIMEOUT_DEFAULT);
this.transferSocketSendBufferSize = getConf().getInt(
DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY,
DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT);
this.transferSocketRecvBufferSize = getConf().getInt(
DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY,
DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT);
this.tcpNoDelay = getConf().getBoolean(
DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY,
DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY_DEFAULT);
/* Based on results on different platforms, we might need set the default
* to false on some of them. */
transferToAllowed = getConf().getBoolean(
DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
readaheadLength = getConf().getLong(
HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
maxDataLength = getConf().getInt(DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH,
DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
dropCacheBehindWrites = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT);
syncBehindWrites = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY,
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT);
syncBehindWritesInBackground = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY,
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT);
dropCacheBehindReads = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);
connectToDnViaHostname = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
this.blockReportInterval = getConf().getLong(
DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
this.peerStatsEnabled = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getInt(
DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
DFSConfigKeys.
DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
this.outliersReportIntervalMs = getConf().getTimeDuration(
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
this.ibrInterval = getConf().getLong(
DFSConfigKeys.DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY,
DFSConfigKeys.DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_DEFAULT);
this.blockReportSplitThreshold = getConf().getLong(
DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY,
DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT);
this.cacheReportInterval = getConf().getLong(
DFS_CACHEREPORT_INTERVAL_MSEC_KEY,
DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
this.datanodeSlowIoWarningThresholdMs = getConf().getLong(
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
initBlockReportDelay();
heartBeatInterval = getConf().getTimeDuration(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS,
TimeUnit.MILLISECONDS);
long confLifelineIntervalMs =
getConf().getTimeDuration(DFS_DATANODE_LIFELINE_INTERVAL_SECONDS_KEY,
3 * getConf().getTimeDuration(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS),
TimeUnit.SECONDS, TimeUnit.MILLISECONDS);
if (confLifelineIntervalMs <= heartBeatInterval) {
confLifelineIntervalMs = 3 * heartBeatInterval;
DataNode.LOG.warn(
String.format("%s must be set to a value greater than %s. " +
"Resetting value to 3 * %s, which is %d milliseconds.",
DFS_DATANODE_LIFELINE_INTERVAL_SECONDS_KEY,
DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_KEY,
confLifelineIntervalMs));
}
lifelineIntervalMs = confLifelineIntervalMs;
// do we need to sync block file contents to disk when blockfile is closed?
this.syncOnClose = getConf().getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
DFS_DATANODE_SYNCONCLOSE_DEFAULT);
this.minimumNameNodeVersion = getConf().get(
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY,
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT);
this.encryptDataTransfer = getConf().getBoolean(
DFS_ENCRYPT_DATA_TRANSFER_KEY,
DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
this.overwriteDownstreamDerivedQOP = getConf().getBoolean(
DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY,
DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_DEFAULT);
this.encryptionAlgorithm = getConf().get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
this.trustedChannelResolver = TrustedChannelResolver.getInstance(getConf());
this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver(
getConf());
this.ignoreSecurePortsForTesting = getConf().getBoolean(
IGNORE_SECURE_PORTS_FOR_TESTING_KEY,
IGNORE_SECURE_PORTS_FOR_TESTING_DEFAULT);
this.xceiverStopTimeout = getConf().getLong(
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
this.maxLockedMemory = getConf().getLongBytes(
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
this.pmemDirs = getConf().getTrimmedStrings(
DFS_DATANODE_PMEM_CACHE_DIRS_KEY);
this.restartReplicaExpiry = getConf().getLong(
DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY,
DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT) * 1000L;
this.allowNonLocalLazyPersist = getConf().getBoolean(
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST,
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT);
this.bpReadyTimeout = getConf().getTimeDuration(
DFS_DATANODE_BP_READY_TIMEOUT_KEY,
DFS_DATANODE_BP_READY_TIMEOUT_DEFAULT, TimeUnit.SECONDS);
this.volFailuresTolerated =
getConf().getInt(
DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
String[] dataDirs =
getConf().getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
this.volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
this.pmemCacheRecoveryEnabled = getConf().getBoolean(
DFS_DATANODE_PMEM_CACHE_RECOVERY_KEY,
DFS_DATANODE_PMEM_CACHE_RECOVERY_DEFAULT);
this.processCommandsThresholdMs = getConf().getTimeDuration(
DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_KEY,
DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_DEFAULT,
TimeUnit.MILLISECONDS
);
}
private void initBlockReportDelay() {
long initBRDelay = getConf().getTimeDuration(
DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT, TimeUnit.SECONDS, TimeUnit.MILLISECONDS);
if (initBRDelay >= blockReportInterval || initBRDelay < 0) {
initBRDelay = 0;
DataNode.LOG.info(DFS_BLOCKREPORT_INITIAL_DELAY_KEY +
" is greater than or equal to " + DFS_BLOCKREPORT_INTERVAL_MSEC_KEY +
". Setting initial delay to 0 msec.");
}
initialBlockReportDelayMs = initBRDelay;
}
// We get minimumNameNodeVersion via a method so it can be mocked out in tests.
String getMinimumNameNodeVersion() {
return this.minimumNameNodeVersion;
}
/**
* Returns the configuration.
*
* @return Configuration the configuration
*/
public Configuration getConf() {
return this.dn.getConf();
}
/**
* Returns true if encryption enabled for DataTransferProtocol.
*
* @return boolean true if encryption enabled for DataTransferProtocol
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer;
}
/**
* Returns encryption algorithm configured for DataTransferProtocol, or null
* if not configured.
*
* @return encryption algorithm configured for DataTransferProtocol
*/
public String getEncryptionAlgorithm() {
return encryptionAlgorithm;
}
public long getXceiverStopTimeout() {
return xceiverStopTimeout;
}
public long getMaxLockedMemory() {
return maxLockedMemory;
}
/**
* Returns true if connect to datanode via hostname
*
* @return boolean true if connect to datanode via hostname
*/
public boolean getConnectToDnViaHostname() {
return connectToDnViaHostname;
}
/**
* Returns socket timeout
*
* @return int socket timeout
*/
public int getSocketTimeout() {
return socketTimeout;
}
/**
* Returns socket write timeout
*
* @return int socket write timeout
*/
public int getSocketWriteTimeout() {
return socketWriteTimeout;
}
/**
* Returns socket timeout for computing the checksum of EC blocks
*
* @return int socket timeout
*/
public int getEcChecksumSocketTimeout() {
return ecChecksumSocketTimeout;
}
/**
* Returns the SaslPropertiesResolver configured for use with
* DataTransferProtocol, or null if not configured.
*
* @return SaslPropertiesResolver configured for use with DataTransferProtocol
*/
public SaslPropertiesResolver getSaslPropsResolver() {
return saslPropsResolver;
}
/**
* Returns the TrustedChannelResolver configured for use with
* DataTransferProtocol, or null if not configured.
*
* @return TrustedChannelResolver configured for use with DataTransferProtocol
*/
public TrustedChannelResolver getTrustedChannelResolver() {
return trustedChannelResolver;
}
/**
* Returns true if configuration is set to skip checking for proper
* port configuration in a secured cluster. This is only intended for use in
* dev testing.
*
* @return true if configured to skip checking secured port configuration
*/
public boolean getIgnoreSecurePortsForTesting() {
return ignoreSecurePortsForTesting;
}
public boolean getAllowNonLocalLazyPersist() {
return allowNonLocalLazyPersist;
}
public int getTransferSocketRecvBufferSize() {
return transferSocketRecvBufferSize;
}
public int getTransferSocketSendBufferSize() {
return transferSocketSendBufferSize;
}
public boolean getDataTransferServerTcpNoDelay() {
return tcpNoDelay;
}
public long getBpReadyTimeout() {
return bpReadyTimeout;
}
/**
* Returns the interval in milliseconds between sending lifeline messages.
*
* @return interval in milliseconds between sending lifeline messages
*/
public long getLifelineIntervalMs() {
return lifelineIntervalMs;
}
public int getVolFailuresTolerated() {
return volFailuresTolerated;
}
public int getVolsConfigured() {
return volsConfigured;
}
public long getSlowIoWarningThresholdMs() {
return datanodeSlowIoWarningThresholdMs;
}
int getMaxDataLength() {
return maxDataLength;
}
public String[] getPmemVolumes() {
return pmemDirs;
}
public boolean getPmemCacheRecoveryEnabled() {
return pmemCacheRecoveryEnabled;
}
public long getProcessCommandsThresholdMs() {
return processCommandsThresholdMs;
}
void setBlockReportInterval(long intervalMs) {
Preconditions.checkArgument(intervalMs > 0,
DFS_BLOCKREPORT_INTERVAL_MSEC_KEY + " should be larger than 0");
blockReportInterval = intervalMs;
}
public long getBlockReportInterval() {
return blockReportInterval;
}
void setCacheReportInterval(long intervalMs) {
Preconditions.checkArgument(intervalMs > 0,
DFS_CACHEREPORT_INTERVAL_MSEC_KEY + " should be larger than 0");
cacheReportInterval = intervalMs;
}
public long getCacheReportInterval() {
return cacheReportInterval;
}
void setBlockReportSplitThreshold(long threshold) {
Preconditions.checkArgument(threshold >= 0,
DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY + " should be larger than or equal to 0");
blockReportSplitThreshold = threshold;
}
void setInitBRDelayMs(String delayMs) {
dn.getConf().set(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, delayMs);
initBlockReportDelay();
}
void setPeerStatsEnabled(boolean enablePeerStats) {
peerStatsEnabled = enablePeerStats;
}
public void setFileIoProfilingSamplingPercentage(int samplingPercentage) {
diskStatsEnabled = Util.isDiskStatsEnabled(samplingPercentage);
}
public void setOutliersReportIntervalMs(String reportIntervalMs) {
dn.getConf().set(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, reportIntervalMs);
outliersReportIntervalMs = getConf().getTimeDuration(
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
}
public void setDatanodeSlowIoWarningThresholdMs(long threshold) {
Preconditions.checkArgument(threshold > 0,
DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY + " should be greater than 0");
datanodeSlowIoWarningThresholdMs = threshold;
}
}
|
DNConf
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
|
{
"start": 2690,
"end": 20055
}
|
class ____ {
private static final NodeId nodeId = NodeId.newInstance("somehost", 5);
private Configuration conf = new Configuration();
@AfterEach
public void tearDown() {
QueueMetrics.clearQueueMetrics();
DefaultMetricsSystem.shutdown();
}
@Test
public void testActiveUsersWhenMove() {
final String user = "user1";
Queue parentQueue = createQueue("parent", null);
Queue queue1 = createQueue("queue1", parentQueue);
Queue queue2 = createQueue("queue2", parentQueue);
Queue queue3 = createQueue("queue3", parentQueue);
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
user, queue1, queue1.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
Priority requestedPriority = Priority.newInstance(2);
ResourceRequest request = ResourceRequest.newInstance(requestedPriority,
ResourceRequest.ANY, requestedResource, 1);
app.updateResourceRequests(Arrays.asList(request));
assertEquals(1, queue1.getAbstractUsersManager().getNumActiveUsers());
// move app from queue1 to queue2
app.move(queue2);
// Active user count has to decrease from queue1
assertEquals(0, queue1.getAbstractUsersManager().getNumActiveUsers());
// Increase the active user count in queue2 if the moved app has pending requests
assertEquals(1, queue2.getAbstractUsersManager().getNumActiveUsers());
// Allocated container
RMContainer container1 = createRMContainer(appAttId, 1, requestedResource);
app.liveContainers.put(container1.getContainerId(), container1);
SchedulerNode node = createNode();
app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node,
toSchedulerKey(requestedPriority), container1);
// Active user count has to decrease from queue2 due to app has NO pending requests
assertEquals(0, queue2.getAbstractUsersManager().getNumActiveUsers());
// move app from queue2 to queue3
app.move(queue3);
// Active user count in queue3 stays same if the moved app has NO pending requests
assertEquals(0, queue3.getAbstractUsersManager().getNumActiveUsers());
}
@Test
public void testMove() {
final String user = "user1";
Queue parentQueue = createQueue("parent", null);
Queue oldQueue = createQueue("old", parentQueue);
Queue newQueue = createQueue("new", parentQueue);
QueueMetrics parentMetrics = parentQueue.getMetrics();
QueueMetrics oldMetrics = oldQueue.getMetrics();
QueueMetrics newMetrics = newQueue.getMetrics();
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
user, oldQueue, oldQueue.getAbstractUsersManager(), rmContext);
app.appSchedulingInfo.setUnmanagedAM(false);
oldMetrics.submitApp(user, false);
// confirm that containerId is calculated based on epoch.
assertEquals(0x30000000001L, app.getNewContainerId());
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
Priority requestedPriority = Priority.newInstance(2);
ResourceRequest request = ResourceRequest.newInstance(requestedPriority,
ResourceRequest.ANY, requestedResource, 3);
app.updateResourceRequests(Arrays.asList(request));
// Allocated container
RMContainer container1 = createRMContainer(appAttId, 1, requestedResource);
app.liveContainers.put(container1.getContainerId(), container1);
SchedulerNode node = createNode();
app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node,
toSchedulerKey(requestedPriority), container1);
// Reserved container
Priority prio1 = Priority.newInstance(1);
Resource reservedResource = Resource.newInstance(2048, 3);
RMContainer container2 = createReservedRMContainer(appAttId, 1, reservedResource,
node.getNodeID(), prio1);
Map<NodeId, RMContainer> reservations = new HashMap<NodeId, RMContainer>();
reservations.put(node.getNodeID(), container2);
app.reservedContainers.put(toSchedulerKey(prio1), reservations);
oldMetrics.reserveResource(container2.getNodeLabelExpression(),
user, reservedResource);
checkQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4);
checkQueueMetrics(newMetrics, 0, 0, 0, 0, 0, 0, 0, 0);
checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4);
app.move(newQueue);
checkQueueMetrics(oldMetrics, 0, 0, 0, 0, 0, 0, 0, 0);
checkQueueMetrics(newMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4);
checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4);
}
private void checkQueueMetrics(QueueMetrics metrics, int activeApps,
int runningApps, int allocMb, int allocVcores, int reservedMb,
int reservedVcores, int pendingMb, int pendingVcores) {
assertEquals(activeApps, metrics.getActiveApps());
assertEquals(runningApps, metrics.getAppsRunning());
assertEquals(allocMb, metrics.getAllocatedMB());
assertEquals(allocVcores, metrics.getAllocatedVirtualCores());
assertEquals(reservedMb, metrics.getReservedMB());
assertEquals(reservedVcores, metrics.getReservedVirtualCores());
assertEquals(pendingMb, metrics.getPendingMB());
assertEquals(pendingVcores, metrics.getPendingVirtualCores());
}
private SchedulerNode createNode() {
SchedulerNode node = mock(SchedulerNode.class);
when(node.getNodeName()).thenReturn("somehost");
when(node.getRackName()).thenReturn("somerack");
when(node.getNodeID()).thenReturn(nodeId);
return node;
}
private RMContainer createReservedRMContainer(ApplicationAttemptId appAttId,
int id, Resource resource, NodeId nodeId, Priority reservedPriority) {
RMContainer container = createRMContainer(appAttId, id, resource);
when(container.getReservedResource()).thenReturn(resource);
when(container.getReservedSchedulerKey())
.thenReturn(toSchedulerKey(reservedPriority));
when(container.getReservedNode()).thenReturn(nodeId);
return container;
}
private RMContainer createRMContainer(ApplicationAttemptId appAttId, int id,
Resource resource) {
ContainerId containerId = ContainerId.newContainerId(appAttId, id);
RMContainer rmContainer = mock(RMContainerImpl.class);
Container container = mock(Container.class);
when(container.getResource()).thenReturn(resource);
when(container.getNodeId()).thenReturn(nodeId);
when(rmContainer.getContainer()).thenReturn(container);
when(rmContainer.getContainerId()).thenReturn(containerId);
return rmContainer;
}
private Queue createQueue(String name, Queue parent) {
return createQueue(name, parent, 1.0f);
}
private Queue createQueue(String name, Queue parent, float capacity) {
QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf);
QueueInfo queueInfo = QueueInfo.newInstance(name,
"root." + name, capacity, 1.0f, 0, null,
null, QueueState.RUNNING, null, "", null, false, -1.0f, 10, null, false);
ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
Queue queue = mock(Queue.class);
when(queue.getMetrics()).thenReturn(metrics);
when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
when(queue.getQueueInfo(false, false)).thenReturn(queueInfo);
return queue;
}
private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) {
ApplicationId appIdImpl = ApplicationId.newInstance(0, appId);
ApplicationAttemptId attId =
ApplicationAttemptId.newInstance(appIdImpl, attemptId);
return attId;
}
@Test
public void testAppPercentages() throws Exception {
FifoScheduler scheduler = mock(FifoScheduler.class);
when(scheduler.getClusterResource())
.thenReturn(Resource.newInstance(10 * 1024, 10));
when(scheduler.getResourceCalculator())
.thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
final String user = "user1";
Queue queue = createQueue("test", null);
SchedulerApplicationAttempt app =
new SchedulerApplicationAttempt(appAttId, user, queue,
queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.01f);
assertEquals(15.0f,
app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
queue = createQueue("test2", null, 0.5f);
app = new SchedulerApplicationAttempt(appAttId, user, queue,
queue.getAbstractUsersManager(), rmContext);
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.01f);
assertEquals(15.0f,
app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
app.attemptResourceUsage.incUsed(requestedResource);
app.attemptResourceUsage.incUsed(requestedResource);
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(120.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.01f);
assertEquals(60.0f,
app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
queue = createQueue("test3", null, Float.MIN_VALUE);
app = new SchedulerApplicationAttempt(appAttId, user, queue,
queue.getAbstractUsersManager(), rmContext);
// Resource request
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(0.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.01f);
assertEquals(15.0f,
app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
@Test
public void testAppPercentagesOnswitch() throws Exception {
FifoScheduler scheduler = mock(FifoScheduler.class);
when(scheduler.getClusterResource()).thenReturn(Resource.newInstance(0, 0));
when(scheduler.getResourceCalculator())
.thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
final String user = "user1";
Queue queue = createQueue("test", null);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
user, queue, queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(0.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.0f);
assertEquals(0.0f, app.getResourceUsageReport().getClusterUsagePercentage(),
0.0f);
}
@Test
public void testAllResourceUsage() throws Exception {
FifoScheduler scheduler = mock(FifoScheduler.class);
when(scheduler.getClusterResource()).thenReturn(Resource.newInstance(0, 0));
when(scheduler.getResourceCalculator())
.thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
final String user = "user1";
Queue queue = createQueue("test", null);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
user, queue, queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.attemptResourceUsage.incUsed("X", requestedResource);
app.attemptResourceUsage.incUsed("Y", requestedResource);
Resource r2 = Resource.newInstance(1024, 1);
app.attemptResourceUsage.incReserved("X", r2);
app.attemptResourceUsage.incReserved("Y", r2);
assertTrue(Resources.equals(Resource.newInstance(3072, 4),
app.getResourceUsageReport().getUsedResources()),
"getUsedResources expected " + Resource.newInstance(3072, 4) +
" but was " + app.getResourceUsageReport().getUsedResources());
assertTrue(Resources.equals(Resource.newInstance(2048, 2),
app.getResourceUsageReport().getReservedResources()),
"getReservedResources expected " + Resource.newInstance(2048, 2) +
" but was " +
app.getResourceUsageReport().getReservedResources());
}
@Test
public void testSchedulingOpportunityOverflow() throws Exception {
ApplicationAttemptId attemptId = createAppAttemptId(0, 0);
Queue queue = createQueue("test", null);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(
attemptId, "user", queue, queue.getAbstractUsersManager(), rmContext);
Priority priority = Priority.newInstance(1);
SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
assertEquals(0, app.getSchedulingOpportunities(schedulerKey));
app.addSchedulingOpportunity(schedulerKey);
assertEquals(1, app.getSchedulingOpportunities(schedulerKey));
// verify the count is capped at MAX_VALUE and does not overflow
app.setSchedulingOpportunities(schedulerKey, Integer.MAX_VALUE - 1);
assertEquals(Integer.MAX_VALUE - 1,
app.getSchedulingOpportunities(schedulerKey));
app.addSchedulingOpportunity(schedulerKey);
assertEquals(Integer.MAX_VALUE,
app.getSchedulingOpportunities(schedulerKey));
app.addSchedulingOpportunity(schedulerKey);
assertEquals(Integer.MAX_VALUE,
app.getSchedulingOpportunities(schedulerKey));
}
@Test
public void testHasPendingResourceRequest() throws Exception {
ApplicationAttemptId attemptId = createAppAttemptId(0, 0);
Queue queue = createQueue("test", null);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(
attemptId, "user", queue, queue.getAbstractUsersManager(), rmContext);
Priority priority = Priority.newInstance(1);
List<ResourceRequest> requests = new ArrayList<>(2);
Resource unit = Resource.newInstance(1L, 1);
// Add a request for a container with a node label
requests.add(ResourceRequest.newInstance(priority, ResourceRequest.ANY,
unit, 1, false, "label1"));
// Add a request for a container without a node label
requests.add(ResourceRequest.newInstance(priority, ResourceRequest.ANY,
unit, 1, false, ""));
// Add unique allocation IDs so that the requests aren't considered
// duplicates
requests.get(0).setAllocationRequestId(0L);
requests.get(1).setAllocationRequestId(1L);
app.updateResourceRequests(requests);
assertTrue(app.hasPendingResourceRequest("",
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY),
"Reported no pending resource requests for no label when " +
"resource requests for no label are pending (exclusive partitions)");
assertTrue(app.hasPendingResourceRequest("label1",
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY),
"Reported no pending resource requests for label with pending " +
"resource requests (exclusive partitions)");
assertFalse(app.hasPendingResourceRequest("label2",
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY),
"Reported pending resource requests for label with no pending " +
"resource requests (exclusive partitions)");
assertTrue(app.hasPendingResourceRequest("",
SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY),
"Reported no pending resource requests for no label when "
+ "resource requests for no label are pending (relaxed partitions)");
assertTrue(app.hasPendingResourceRequest("label1",
SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY),
"Reported no pending resource requests for label with pending "
+ "resource requests (relaxed partitions)");
assertTrue(app.hasPendingResourceRequest("label2",
SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY),
"Reported no pending resource requests for label with no "
+ "pending resource requests (relaxed partitions)");
}
}
|
TestSchedulerApplicationAttempt
|
java
|
junit-team__junit5
|
junit-platform-launcher/src/main/java/org/junit/platform/launcher/LauncherConstants.java
|
{
"start": 3792,
"end": 4413
}
|
class ____ (<em>FQCN</em>) of each registered
* listener. Any dot ({@code .}) in a pattern will match against a dot ({@code .})
* or a dollar sign ({@code $}) in a FQCN. Any asterisk ({@code *}) will match
* against one or more characters in a FQCN. All other characters in a pattern
* will be matched one-to-one against a FQCN.
*
* <h4>Examples</h4>
*
* <ul>
* <li>{@code *}: deactivates all listeners.
* <li>{@code org.junit.*}: deactivates every listener under the {@code org.junit}
* base package and any of its subpackages.
* <li>{@code *.MyListener}: deactivates every listener whose simple
|
name
|
java
|
apache__rocketmq
|
broker/src/test/java/org/apache/rocketmq/broker/offset/BroadcastOffsetManagerTest.java
|
{
"start": 1800,
"end": 7719
}
|
class ____ {
private final AtomicLong maxOffset = new AtomicLong(10L);
private final AtomicLong commitOffset = new AtomicLong(-1);
private final ConsumerOffsetManager consumerOffsetManager = mock(ConsumerOffsetManager.class);
private final ConsumerManager consumerManager = mock(ConsumerManager.class);
private final BrokerConfig brokerConfig = new BrokerConfig();
private final Set<String> onlineClientIdSet = new HashSet<>();
private BroadcastOffsetManager broadcastOffsetManager;
@Before
public void before() throws ConsumeQueueException {
brokerConfig.setEnableBroadcastOffsetStore(true);
brokerConfig.setBroadcastOffsetExpireSecond(1);
brokerConfig.setBroadcastOffsetExpireMaxSecond(5);
BrokerController brokerController = mock(BrokerController.class);
when(brokerController.getBrokerConfig()).thenReturn(brokerConfig);
when(brokerController.getConsumerManager()).thenReturn(consumerManager);
doAnswer((Answer<ClientChannelInfo>) mock -> {
String clientId = mock.getArgument(1);
if (onlineClientIdSet.contains(clientId)) {
return new ClientChannelInfo(null);
}
return null;
}).when(consumerManager).findChannel(anyString(), anyString());
doAnswer((Answer<Long>) mock -> commitOffset.get())
.when(consumerOffsetManager).queryOffset(anyString(), anyString(), anyInt());
doAnswer((Answer<Void>) mock -> {
commitOffset.set(mock.getArgument(4));
return null;
}).when(consumerOffsetManager).commitOffset(anyString(), anyString(), anyString(), anyInt(), anyLong());
when(brokerController.getConsumerOffsetManager()).thenReturn(consumerOffsetManager);
MessageStore messageStore = mock(MessageStore.class);
doAnswer((Answer<Long>) mock -> maxOffset.get())
.when(messageStore).getMaxOffsetInQueue(anyString(), anyInt(), anyBoolean());
when(brokerController.getMessageStore()).thenReturn(messageStore);
broadcastOffsetManager = new BroadcastOffsetManager(brokerController);
}
@Test
public void testBroadcastOffsetSwitch() throws ConsumeQueueException {
// client1 connect to broker
onlineClientIdSet.add("client1");
long offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", 0, false);
Assert.assertEquals(-1, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 10, "client1", false);
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", 11, false);
Assert.assertEquals(-1, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 11, "client1", false);
// client1 connect to proxy
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", -1, true);
Assert.assertEquals(11, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 11, "client1", true);
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", 11, true);
Assert.assertEquals(-1, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 12, "client1", true);
broadcastOffsetManager.scanOffsetData();
Assert.assertEquals(12L, commitOffset.get());
// client2 connect to proxy
onlineClientIdSet.add("client2");
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client2", -1, true);
Assert.assertEquals(12, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 12, "client2", true);
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client2", 11, true);
Assert.assertEquals(-1, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 13, "client2", true);
broadcastOffsetManager.scanOffsetData();
Assert.assertEquals(12L, commitOffset.get());
// client1 connect to broker
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", 20, false);
Assert.assertEquals(12, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 12, "client1", false);
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client1", 12, false);
Assert.assertEquals(-1, offset);
onlineClientIdSet.clear();
maxOffset.set(30L);
// client3 connect to broker
onlineClientIdSet.add("client3");
offset = broadcastOffsetManager.queryInitOffset("group", "topic", 0, "client3", 30, false);
Assert.assertEquals(-1, offset);
broadcastOffsetManager.updateOffset("group", "topic", 0, 30, "client3", false);
await().atMost(Duration.ofSeconds(brokerConfig.getBroadcastOffsetExpireSecond() + 1)).until(() -> {
broadcastOffsetManager.scanOffsetData();
return commitOffset.get() == 30L;
});
}
@Test
public void testBroadcastOffsetExpire() {
onlineClientIdSet.add("client1");
broadcastOffsetManager.updateOffset(
"group", "topic", 0, 10, "client1", false);
onlineClientIdSet.clear();
await().atMost(Duration.ofSeconds(brokerConfig.getBroadcastOffsetExpireSecond() + 1)).until(() -> {
broadcastOffsetManager.scanOffsetData();
return broadcastOffsetManager.offsetStoreMap.isEmpty();
});
onlineClientIdSet.add("client1");
broadcastOffsetManager.updateOffset(
"group", "topic", 0, 10, "client1", false);
await().atMost(Duration.ofSeconds(brokerConfig.getBroadcastOffsetExpireMaxSecond() + 1)).until(() -> {
broadcastOffsetManager.scanOffsetData();
return broadcastOffsetManager.offsetStoreMap.isEmpty();
});
}
}
|
BroadcastOffsetManagerTest
|
java
|
google__guice
|
core/src/com/google/inject/internal/InternalFlags.java
|
{
"start": 815,
"end": 1980
}
|
class ____ {
private static final Logger logger = Logger.getLogger(InternalFlags.class.getName());
private static final IncludeStackTraceOption INCLUDE_STACK_TRACES =
getSystemOption(
"guice_include_stack_traces",
IncludeStackTraceOption.ONLY_FOR_DECLARING_SOURCE);
private static final CustomClassLoadingOption CUSTOM_CLASS_LOADING =
getSystemOption(
"guice_custom_class_loading",
CustomClassLoadingOption.BRIDGE,
CustomClassLoadingOption.OFF);
private static final NullableProvidesOption NULLABLE_PROVIDES =
getSystemOption("guice_check_nullable_provides_params", NullableProvidesOption.ERROR);
private static final BytecodeGenOption BYTECODE_GEN_OPTION =
getSystemOption("guice_bytecode_gen_option", BytecodeGenOption.ENABLED);
private static final ColorizeOption COLORIZE_OPTION =
getSystemOption("guice_colorize_error_messages", ColorizeOption.OFF);
private static final UseMethodHandlesOption USE_METHOD_HANDLES =
getSystemOption("guice_use_method_handles", UseMethodHandlesOption.NO);
/** The options for using `MethodHandles`. */
public
|
InternalFlags
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/test/ReadableMatchers.java
|
{
"start": 1848,
"end": 2574
}
|
class ____ extends TypeSafeMatcher<Long> {
private final long timeMillis;
public DateMillisMatcher(String date) {
this.timeMillis = Instant.parse(date).toEpochMilli();
}
@Override
public boolean matchesSafely(Long item) {
return timeMillis == item;
}
@Override
public void describeMismatchSafely(Long item, Description description) {
description.appendText("was ").appendValue(dateFormatter.formatMillis(item));
}
@Override
public void describeTo(Description description) {
description.appendText(dateFormatter.formatMillis(timeMillis));
}
}
public static
|
DateMillisMatcher
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/transaction/interceptor/MatchAlwaysTransactionAttributeSource.java
|
{
"start": 1548,
"end": 2952
}
|
class ____ implements TransactionAttributeSource, Serializable {
private TransactionAttribute transactionAttribute = new DefaultTransactionAttribute();
/**
* Allows a transaction attribute to be specified, using the String form, for
* example, "PROPAGATION_REQUIRED".
* @param transactionAttribute the String form of the transactionAttribute to use.
* @see org.springframework.transaction.interceptor.TransactionAttributeEditor
*/
public void setTransactionAttribute(TransactionAttribute transactionAttribute) {
if (transactionAttribute instanceof DefaultTransactionAttribute dta) {
dta.resolveAttributeStrings(null);
}
this.transactionAttribute = transactionAttribute;
}
@Override
public @Nullable TransactionAttribute getTransactionAttribute(Method method, @Nullable Class<?> targetClass) {
return (ClassUtils.isUserLevelMethod(method) ? this.transactionAttribute : null);
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof MatchAlwaysTransactionAttributeSource that &&
ObjectUtils.nullSafeEquals(this.transactionAttribute, that.transactionAttribute)));
}
@Override
public int hashCode() {
return MatchAlwaysTransactionAttributeSource.class.hashCode();
}
@Override
public String toString() {
return getClass().getName() + ": " + this.transactionAttribute;
}
}
|
MatchAlwaysTransactionAttributeSource
|
java
|
apache__camel
|
components/camel-tahu/src/main/java/org/apache/camel/component/tahu/TahuEdgeEndpoint.java
|
{
"start": 2150,
"end": 9209
}
|
class ____ extends TahuDefaultEndpoint implements HeaderFilterStrategyAware {
@UriPath(label = "producer", description = "ID of the group")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME, required = true)
private final String groupId;
@UriPath(label = "producer", description = "ID of the edge node")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME, required = true)
private final String edgeNode;
@UriPath(label = "producer (device only)", description = "ID of this edge node device")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private final String deviceId;
@UriParam(label = "producer (edge node only)", description = "Host ID of the primary host application for this edge node")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private String primaryHostId;
@UriParam(label = "producer (edge node only)",
description = "ID of each device connected to this edge node, as a comma-separated list")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private String deviceIds;
@UriParam(label = "producer",
description = "Tahu SparkplugBPayloadMap to configure metric data types for this edge node or device. Note that this payload is used exclusively as a Sparkplug B spec-compliant configuration for all possible edge node or device metric names, aliases, and data types. This configuration is required to publish proper Sparkplug B NBIRTH and DBIRTH payloads.")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME, required = true)
private SparkplugBPayloadMap metricDataTypePayloadMap;
@UriParam(label = "producer (edge node only),advanced", description = "Flag enabling support for metric aliases",
defaultValue = "false")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private boolean useAliases = false;
@UriParam(label = "producer,advanced",
description = "To use a custom HeaderFilterStrategy to filter headers used as Sparkplug metrics",
defaultValueNote = "Defaults to sending all Camel Message headers with name prefixes of \""
+ TahuConstants.METRIC_HEADER_PREFIX + "\", including those with null values")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private volatile HeaderFilterStrategy headerFilterStrategy;
@UriParam(label = "producer (edge node only),advanced",
description = "To use a specific org.eclipse.tahu.message.BdSeqManager implementation to manage edge node birth-death sequence numbers",
defaultValue = "org.apache.camel.component.tahu.CamelBdSeqManager")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private volatile BdSeqManager bdSeqManager;
@UriParam(label = "producer (edge node only),advanced",
description = "Path for Sparkplug B NBIRTH/NDEATH sequence number persistence files. This path will contain files named as \"<Edge Node ID>-bdSeqNum\" and must be writable by the executing process' user",
defaultValue = "${sys:java.io.tmpdir}/CamelTahuTemp")
@Metadata(applicableFor = TahuConstants.EDGE_NODE_SCHEME)
private String bdSeqNumPath;
private final EdgeNodeDescriptor edgeNodeDescriptor;
TahuEdgeEndpoint(String uri, TahuDefaultComponent component, TahuConfiguration configuration, String groupId,
String edgeNode, String deviceId) {
super(uri, component, configuration);
this.groupId = ObjectHelper.notNullOrEmpty(groupId, "groupId");
this.edgeNode = ObjectHelper.notNullOrEmpty(edgeNode, "edgeNode");
// Device ID can only be null or non-empty
this.deviceId = (deviceId != null && deviceId.length() == 0) ? null : deviceId;
if (ObjectHelper.isNotEmpty(deviceId)) {
edgeNodeDescriptor = new DeviceDescriptor(groupId, edgeNode, deviceId);
} else {
edgeNodeDescriptor = new EdgeNodeDescriptor(groupId, edgeNode);
}
}
@Override
public Producer createProducer() throws Exception {
TahuEdgeProducer.Builder producerBuilder = new TahuEdgeProducer.Builder(this)
.groupId(ObjectHelper.notNullOrEmpty(groupId, "groupId"))
.edgeNode(ObjectHelper.notNullOrEmpty(edgeNode, "edgeNode"));
ObjectHelper.ifNotEmpty(deviceId, producerBuilder::deviceId);
return producerBuilder.build();
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("Cannot consume from this endpoint");
}
public EdgeNodeDescriptor getEdgeNodeDescriptor() {
return edgeNodeDescriptor;
}
public String getGroupId() {
return groupId;
}
public String getEdgeNode() {
return edgeNode;
}
public String getDeviceId() {
return deviceId;
}
public String getPrimaryHostId() {
return primaryHostId;
}
public void setPrimaryHostId(String primaryHostId) {
this.primaryHostId = primaryHostId;
}
public String getDeviceIds() {
return deviceIds;
}
public void setDeviceIds(String deviceIds) {
this.deviceIds = deviceIds;
}
public List<String> getDeviceIdList() {
return Arrays.asList(deviceIds.split(","));
}
public SparkplugBPayloadMap getMetricDataTypePayloadMap() {
return metricDataTypePayloadMap;
}
public void setMetricDataTypePayloadMap(SparkplugBPayloadMap metricDataTypePayloadMap) {
this.metricDataTypePayloadMap = metricDataTypePayloadMap;
}
public boolean isUseAliases() {
return useAliases;
}
public void setUseAliases(boolean useAliases) {
this.useAliases = useAliases;
}
public BdSeqManager getBdSeqManager() {
return bdSeqManager;
}
public void setBdSeqManager(BdSeqManager bdSeqManager) {
this.bdSeqManager = bdSeqManager;
}
public String getBdSeqNumPath() {
return bdSeqNumPath;
}
public void setBdSeqNumPath(String bdSeqNumPath) {
this.bdSeqNumPath = bdSeqNumPath;
}
@Override
public HeaderFilterStrategy getHeaderFilterStrategy() {
HeaderFilterStrategy existingStrategy = this.headerFilterStrategy;
if (existingStrategy == null) {
DefaultHeaderFilterStrategy strategy = new DefaultHeaderFilterStrategy();
this.headerFilterStrategy = existingStrategy = strategy;
strategy.setFilterOnMatch(false);
strategy.setOutFilter((String) null);
strategy.setOutFilterPattern((String) null);
strategy.setOutFilterStartsWith(TahuConstants.METRIC_HEADER_PREFIX);
strategy.setAllowNullValues(true);
}
return existingStrategy;
}
@Override
public void setHeaderFilterStrategy(HeaderFilterStrategy strategy) {
this.headerFilterStrategy = strategy;
}
}
|
TahuEdgeEndpoint
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/filter/IncludePropsForSerTest.java
|
{
"start": 1007,
"end": 1134
}
|
class ____
{
@JsonIncludeProperties({"y"})
public XY value = new XY();
}
static
|
WrapperWithPropInclude
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/stages/CreateOutputDirectoriesStage.java
|
{
"start": 14944,
"end": 15154
}
|
enum ____ {
dirFoundInStore,
dirFoundInMap,
dirWasCreated,
dirCreatedOnSecondAttempt,
fileNowDeleted,
ancestorWasDirOrMissing,
parentWasNotFile,
parentOfCreatedDir
}
}
|
DirMapState
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/model/ResourceLoader.java
|
{
"start": 3166,
"end": 3399
}
|
class ____ unused by Glide. {@link AssetFileDescriptorFactory} should be
* preferred because it's not possible to reliably load a simple {@link
* java.io.FileDescriptor} for resources.
*/
@Deprecated
public static
|
is
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/FutureReturnValueIgnoredTest.java
|
{
"start": 9124,
"end": 9231
}
|
interface ____ {
@CanIgnoreReturnValue
Future<Object> getFuture();
}
public static
|
CanIgnoreMethod
|
java
|
apache__camel
|
components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/legacy/AdviceRouteTest.java
|
{
"start": 1297,
"end": 1553
}
|
class ____ extends CamelMainTestSupport {
@Override
public boolean isUseAdviceWith() {
return true;
}
@Override
protected void configure(MainConfigurationProperties configuration) {
// Add the configuration
|
AdviceRouteTest
|
java
|
netty__netty
|
codec-native-quic/src/test/java/io/netty/handler/codec/quic/QuicChannelConnectTest.java
|
{
"start": 82431,
"end": 83100
}
|
class ____ extends ChannelInboundHandlerAdapter {
private final CountDownLatch latch;
private final int numBytes;
private int bytes;
BytesCountingHandler(CountDownLatch latch, int numBytes) {
this.latch = latch;
this.numBytes = numBytes;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ByteBuf buffer = (ByteBuf) msg;
bytes += buffer.readableBytes();
ctx.writeAndFlush(buffer);
if (bytes == numBytes) {
latch.countDown();
}
}
}
private static final
|
BytesCountingHandler
|
java
|
spring-projects__spring-framework
|
framework-docs/src/main/java/org/springframework/docs/integration/jms/jmssendingconversion/JmsSenderWithConversion.java
|
{
"start": 921,
"end": 1401
}
|
class ____ {
private JmsTemplate jmsTemplate;
public void sendWithConversion() {
Map<String, Object> map = new HashMap<>();
map.put("Name", "Mark");
map.put("Age", 47);
jmsTemplate.convertAndSend("testQueue", map, new MessagePostProcessor() {
public Message postProcessMessage(Message message) throws JMSException {
message.setIntProperty("AccountID", 1234);
message.setJMSCorrelationID("123-00001");
return message;
}
});
}
}
|
JmsSenderWithConversion
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/custom/CustomRequestManagerTests.java
|
{
"start": 1353,
"end": 3263
}
|
class ____ extends ESTestCase {
private ThreadPool threadPool;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = createThreadPool(inferenceUtilityExecutors());
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
terminate(threadPool);
}
public void testCreateRequest_ThrowsException_ForInvalidUrl() {
var inferenceId = "inference_id";
var requestContentString = """
{
"input": ${input}
}
""";
var serviceSettings = new CustomServiceSettings(
CustomServiceSettings.TextEmbeddingSettings.NON_TEXT_EMBEDDING_TASK_TYPE_SETTINGS,
"${url}",
null,
null,
requestContentString,
new RerankResponseParser("$.result.score"),
new RateLimitSettings(10_000)
);
var model = CustomModelTests.createModel(
inferenceId,
TaskType.RERANK,
serviceSettings,
new CustomTaskSettings(Map.of("url", "^")),
new CustomSecretSettings(Map.of("api_key", new SecureString("my-secret-key".toCharArray())))
);
var listener = new PlainActionFuture<InferenceServiceResults>();
var manager = CustomRequestManager.of(model, threadPool);
manager.execute(new EmbeddingsInput(List.of("abc", "123"), null), mock(RequestSender.class), () -> false, listener);
var exception = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TimeValue.timeValueSeconds(30)));
assertThat(exception.getMessage(), is("Failed to construct the custom service request"));
assertThat(exception.getCause().getMessage(), startsWith("Failed to build URI, error: Illegal character in path"));
}
}
|
CustomRequestManagerTests
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java
|
{
"start": 71324,
"end": 72899
}
|
class ____ extends NoncondexpressionContext {
public List<NoncondexpressionContext> noncondexpression() {
return getRuleContexts(NoncondexpressionContext.class);
}
public NoncondexpressionContext noncondexpression(int i) {
return getRuleContext(NoncondexpressionContext.class, i);
}
public TerminalNode LT() {
return getToken(PainlessParser.LT, 0);
}
public TerminalNode LTE() {
return getToken(PainlessParser.LTE, 0);
}
public TerminalNode GT() {
return getToken(PainlessParser.GT, 0);
}
public TerminalNode GTE() {
return getToken(PainlessParser.GTE, 0);
}
public TerminalNode EQ() {
return getToken(PainlessParser.EQ, 0);
}
public TerminalNode EQR() {
return getToken(PainlessParser.EQR, 0);
}
public TerminalNode NE() {
return getToken(PainlessParser.NE, 0);
}
public TerminalNode NER() {
return getToken(PainlessParser.NER, 0);
}
public CompContext(NoncondexpressionContext ctx) {
copyFrom(ctx);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof PainlessParserVisitor) return ((PainlessParserVisitor<? extends T>) visitor).visitComp(this);
else return visitor.visitChildren(this);
}
}
@SuppressWarnings("CheckReturnValue")
public static
|
CompContext
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/customwordembedding/RelevantScriptFeatureExtractor.java
|
{
"start": 867,
"end": 2459
}
|
class ____ implements FeatureExtractor {
@Override
public FeatureValue[] extractFeatures(String text) {
if (text.isEmpty()) {
return new FeatureValue[0];
}
// counts[s] is the number of characters with script s.
// Use treemap so results are sorted in scriptid order
final Counter totalCount = Counter.newCounter();
TreeMap<ScriptDetector.Script, Counter> counts = new TreeMap<>();
text.codePoints().forEach(cp -> {
// Get anything that is a letter, or anything complex enough warranting a check (more than one UTF-8 byte).
// cp > Byte.MAX_VALUE works as the first 127 codepoints are the same as the ASCII encoding,
// which is the same as one UTF-8 byte.
if (Character.isLetter(cp) || cp > Byte.MAX_VALUE) {
ScriptDetector.Script script = ScriptDetector.Script.fromCodePoint(cp);
counts.computeIfAbsent(script, (s) -> Counter.newCounter()).addAndGet(1);
totalCount.addAndGet(1L);
}
});
FeatureValue[] result = new FeatureValue[counts.size()];
int index = 0;
for (Map.Entry<ScriptDetector.Script, Counter> entry : counts.entrySet()) {
ScriptDetector.Script scriptId = entry.getKey();
long count = entry.getValue().get();
double weight = (double) count / (double) totalCount.get();
result[index++] = new ContinuousFeatureValue(scriptId.toInt(), weight);
}
return result;
}
}
|
RelevantScriptFeatureExtractor
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java
|
{
"start": 3224,
"end": 8408
}
|
class ____ extends AbstractContractVectoredReadTest {
public ITestS3AContractAnalyticsStreamVectoredRead(String bufferType) {
super(bufferType);
}
private static final String REQUEST_COALESCE_TOLERANCE_KEY =
ANALYTICS_ACCELERATOR_CONFIGURATION_PREFIX + "." + AAL_REQUEST_COALESCE_TOLERANCE;
private static final String READ_BUFFER_SIZE_KEY =
ANALYTICS_ACCELERATOR_CONFIGURATION_PREFIX + "." + AAL_READ_BUFFER_SIZE;
private static final String SMALL_OBJECT_PREFETCH_ENABLED_KEY =
ANALYTICS_ACCELERATOR_CONFIGURATION_PREFIX + "." + AAL_SMALL_OBJECT_PREFETCH_ENABLED;
private static final String CACHE_TIMEOUT_KEY =
ANALYTICS_ACCELERATOR_CONFIGURATION_PREFIX + "." + AAL_CACHE_TIMEOUT;
/**
* Create a configuration.
* @return a configuration
*/
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
S3ATestUtils.disableFilesystemCaching(conf);
removeBaseAndBucketOverrides(conf,
REQUEST_COALESCE_TOLERANCE_KEY,
READ_BUFFER_SIZE_KEY,
SMALL_OBJECT_PREFETCH_ENABLED_KEY,
CACHE_TIMEOUT_KEY);
// Set the coalesce tolerance to 1KB, default is 1MB.
conf.setInt(REQUEST_COALESCE_TOLERANCE_KEY, S_16K);
// Set the minimum block size to 32KB. AAL uses a default block size of 128KB, which means the minimum size a S3
// request will be is 128KB. Since the file being read is 128KB, we need to use this here to demonstrate that
// separate GET requests are made for ranges that are not coalesced.
conf.setInt(READ_BUFFER_SIZE_KEY, S_32K);
// Disable small object prefetched, otherwise anything less than 8MB is fetched in a single GET.
conf.set(SMALL_OBJECT_PREFETCH_ENABLED_KEY, "false");
conf.setInt(CACHE_TIMEOUT_KEY, 5000);
enableAnalyticsAccelerator(conf);
// If encryption is set, some AAL tests will fail.
// This is because AAL caches the head request response, and uses
// the eTag when making a GET request. When using encryption, the eTag is
// no longer a hash of the object content, and is not always the same when
// the same object is created multiple times. This test creates the file
// vectored_file.txt before running each test, which will have a
// different eTag when using encryption, leading to preconditioned failures.
// This issue is tracked in:
// https://github.com/awslabs/analytics-accelerator-s3/issues/218
skipForAnyEncryptionExceptSSES3(conf);
return conf;
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
/**
* When the offset is negative, AAL returns IllegalArgumentException, whereas the base implementation will return
* an EoF.
*/
@Override
public void testNegativeOffsetRange() throws Exception {
verifyExceptionalVectoredRead(ContractTestUtils.range(-1, 50), IllegalArgumentException.class);
}
/**
* Currently there is no null check on the release operation, this will be fixed in the next AAL version.
*/
@Override
public void testNullReleaseOperation() {
skip("AAL current does not do a null check on the release operation");
}
@Test
public void testReadVectoredWithAALStatsCollection() throws Exception {
List<FileRange> fileRanges = new ArrayList<>();
fileRanges.add(FileRange.createFileRange(0, 100));
fileRanges.add(FileRange.createFileRange(800, 200));
fileRanges.add(FileRange.createFileRange(4 * S_1K, 4 * S_1K));
fileRanges.add(FileRange.createFileRange(80 * S_1K, 4 * S_1K));
try (FSDataInputStream in = openVectorFile()) {
in.readVectored(fileRanges, getAllocate());
validateVectoredReadResult(fileRanges, DATASET, 0);
IOStatistics st = in.getIOStatistics();
verifyStatisticCounterValue(st,
StreamStatisticNames.STREAM_READ_ANALYTICS_OPENED, 1);
verifyStatisticCounterValue(st,
StreamStatisticNames.STREAM_READ_VECTORED_OPERATIONS,
1);
// Verify ranges are coalesced, we are using a coalescing tolerance of 16KB, so [0-100, 800-200, 4KB-8KB] will
// get coalesced into a single range.
verifyStatisticCounterValue(st, StreamStatisticNames.STREAM_READ_VECTORED_INCOMING_RANGES, 4);
verifyStatisticCounterValue(st, StreamStatisticNames.STREAM_READ_VECTORED_COMBINED_RANGES, 2);
verifyStatisticCounterValue(st, ACTION_HTTP_GET_REQUEST, 2);
// read the same ranges again to demonstrate that the data is cached, and no new GETs are made.
in.readVectored(fileRanges, getAllocate());
verifyStatisticCounterValue(st, ACTION_HTTP_GET_REQUEST, 2);
// Because of how AAL is currently written, it is not possible to track cache hits that originate from a
// readVectored() accurately. For this reason, cache hits from readVectored are currently not tracked, for more
// details see: https://github.com/awslabs/analytics-accelerator-s3/issues/359
verifyStatisticCounterValue(st, StreamStatisticNames.STREAM_READ_CACHE_HIT, 0);
}
}
}
|
ITestS3AContractAnalyticsStreamVectoredRead
|
java
|
google__guice
|
core/src/com/google/inject/internal/Annotations.java
|
{
"start": 16175,
"end": 17198
}
|
class ____.
*/
public static Class<? extends Annotation> canonicalizeIfNamed(
Class<? extends Annotation> annotationType) {
if (
annotationType == jakarta.inject.Named.class) {
return Named.class;
}
return annotationType;
}
/**
* Returns the name the binding should use. This is based on the annotation. If the annotation has
* an instance and is not a marker annotation, we ask the annotation for its toString. If it was a
* marker annotation or just an annotation type, we use the annotation's name. Otherwise, the name
* is the empty string.
*/
public static String nameOf(Key<?> key) {
Annotation annotation = key.getAnnotation();
Class<? extends Annotation> annotationType = key.getAnnotationType();
if (annotation != null && !isMarker(annotationType)) {
return key.getAnnotation().toString();
} else if (key.getAnnotationType() != null) {
return '@' + key.getAnnotationType().getName();
} else {
return "";
}
}
}
|
otherwise
|
java
|
quarkusio__quarkus
|
extensions/devui/deployment/src/main/java/io/quarkus/devui/deployment/InternalImportMapBuildItem.java
|
{
"start": 209,
"end": 663
}
|
class ____ extends MultiBuildItem {
private final Map<String, String> importMap = new HashMap<>();
public InternalImportMapBuildItem() {
}
public void add(Map<String, String> importMap) {
this.importMap.putAll(importMap);
}
public void add(String key, String path) {
this.importMap.put(key, path);
}
public Map<String, String> getImportMap() {
return importMap;
}
}
|
InternalImportMapBuildItem
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/client/protocol/pubsub/Message.java
|
{
"start": 746,
"end": 804
}
|
interface ____ {
ChannelName getChannel();
}
|
Message
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoBuilderKotlinTest.java
|
{
"start": 11378,
"end": 12520
}
|
interface ____ {
static KotlinDataWithListBuilder builder() {
return new AutoBuilder_AutoBuilderKotlinTest_KotlinDataWithListBuilder();
}
static KotlinDataWithListBuilder builder(KotlinDataWithList kotlinData) {
return new AutoBuilder_AutoBuilderKotlinTest_KotlinDataWithListBuilder(kotlinData);
}
KotlinDataWithListBuilder list(List<? extends CharSequence> list);
KotlinDataWithListBuilder number(int number);
KotlinDataWithList build();
}
// The `getList()` method returns `List<CharSequence>` as seen from Java, but the `list` parameter
// to the constructor has type `List<? extends CharSequence>`.
@Test
public void kotlinWildcards() {
List<String> strings = ImmutableList.of("foo");
KotlinDataWithList x = KotlinDataWithListBuilder.builder().list(strings).number(17).build();
assertThat(x.getList()).isEqualTo(strings);
assertThat(x.getNumber()).isEqualTo(17);
KotlinDataWithList y = KotlinDataWithListBuilder.builder(x).number(23).build();
assertThat(y.getList()).isEqualTo(strings);
assertThat(y.getNumber()).isEqualTo(23);
}
}
|
KotlinDataWithListBuilder
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/proxy/HttpProxyDevServicesDeclarativeClientTest.java
|
{
"start": 2504,
"end": 2625
}
|
interface ____ {
@Path("count")
@GET
long count();
}
@Path("test")
public static
|
Client
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MigrateAction.java
|
{
"start": 1281,
"end": 5743
}
|
class ____ implements LifecycleAction {
public static final String NAME = "migrate";
public static final ParseField ENABLED_FIELD = new ParseField("enabled");
public static final MigrateAction ENABLED = new MigrateAction(true);
public static final MigrateAction DISABLED = new MigrateAction(false);
private static final Logger logger = LogManager.getLogger(MigrateAction.class);
public static final String CONDITIONAL_SKIP_MIGRATE_STEP = BranchingStep.NAME + "-check-skip-action";
private static final ConstructingObjectParser<MigrateAction, Void> PARSER = new ConstructingObjectParser<>(
NAME,
a -> a[0] == null || (boolean) a[0] ? ENABLED : DISABLED
);
static {
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ENABLED_FIELD);
}
private final boolean enabled;
public static MigrateAction parse(XContentParser parser) {
return PARSER.apply(parser, null);
}
private MigrateAction(boolean enabled) {
this.enabled = enabled;
}
public static MigrateAction readFrom(StreamInput in) throws IOException {
return in.readBoolean() ? ENABLED : DISABLED;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(enabled);
}
@Override
public String getWriteableName() {
return NAME;
}
public boolean isEnabled() {
return enabled;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(ENABLED_FIELD.getPreferredName(), enabled);
builder.endObject();
return builder;
}
@Override
public boolean isSafeAction() {
return true;
}
@Override
public List<Step> toSteps(Client client, String phase, StepKey nextStepKey) {
if (enabled) {
StepKey preMigrateBranchingKey = new StepKey(phase, NAME, CONDITIONAL_SKIP_MIGRATE_STEP);
StepKey migrationKey = new StepKey(phase, NAME, NAME);
StepKey migrationRoutedKey = new StepKey(phase, NAME, DataTierMigrationRoutedStep.NAME);
String targetTier = "data_" + phase;
assert DataTier.validTierName(targetTier) : "invalid data tier name:" + targetTier;
BranchingStep conditionalSkipActionStep = new BranchingStep(
preMigrateBranchingKey,
migrationKey,
nextStepKey,
(index, project) -> {
IndexMetadata indexMetadata = project.index(index);
// partially mounted indices will already have data_frozen, and we don't want to change that if they do
if (indexMetadata.isPartialSearchableSnapshot()) {
String policyName = indexMetadata.getLifecyclePolicyName();
logger.debug(
"[{}] action in policy [{}] is configured for index [{}] which is a partially mounted index. "
+ "skipping this action",
MigrateAction.NAME,
policyName,
index.getName()
);
return true;
}
return false;
}
);
UpdateSettingsStep updateMigrationSettingStep = new UpdateSettingsStep(
migrationKey,
migrationRoutedKey,
client,
getPreferredTiersConfigurationSettings(targetTier)
);
DataTierMigrationRoutedStep migrationRoutedStep = new DataTierMigrationRoutedStep(migrationRoutedKey, nextStepKey);
return List.of(conditionalSkipActionStep, updateMigrationSettingStep, migrationRoutedStep);
} else {
return List.of();
}
}
@Override
public int hashCode() {
return Objects.hash(enabled);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
MigrateAction other = (MigrateAction) obj;
return Objects.equals(enabled, other.enabled);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
|
MigrateAction
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/transaction/annotation/AnnotationTransactionAttributeSource.java
|
{
"start": 1912,
"end": 2502
}
|
class ____ a custom TransactionAttributeSource,
* or get customized through {@link TransactionAnnotationParser} strategies.
*
* @author Colin Sampaleanu
* @author Juergen Hoeller
* @since 1.2
* @see Transactional
* @see TransactionAnnotationParser
* @see SpringTransactionAnnotationParser
* @see Ejb3TransactionAnnotationParser
* @see org.springframework.transaction.interceptor.TransactionInterceptor#setTransactionAttributeSource
* @see org.springframework.transaction.interceptor.TransactionProxyFactoryBean#setTransactionAttributeSource
*/
@SuppressWarnings("serial")
public
|
for
|
java
|
apache__maven
|
impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvn/MavenParser.java
|
{
"start": 1257,
"end": 4541
}
|
class ____ extends BaseParser {
@Override
protected Options parseCliOptions(LocalContext context) {
ArrayList<MavenOptions> result = new ArrayList<>();
// CLI args
MavenOptions cliOptions = parseMavenCliOptions(context.parserRequest.args());
result.add(cliOptions);
// atFile option
if (cliOptions.atFile().isPresent()) {
Path file = context.cwd.resolve(cliOptions.atFile().orElseThrow());
if (Files.isRegularFile(file)) {
result.add(parseMavenAtFileOptions(file));
} else {
throw new IllegalArgumentException("Specified file does not exists (" + file + ")");
}
}
// maven.config; if exists
Path mavenConfig = context.rootDirectory != null ? context.rootDirectory.resolve(".mvn/maven.config") : null;
if (mavenConfig != null && Files.isRegularFile(mavenConfig)) {
result.add(parseMavenConfigOptions(mavenConfig));
}
return LayeredMavenOptions.layerMavenOptions(result);
}
protected MavenOptions parseMavenCliOptions(List<String> args) {
try {
return parseArgs(Options.SOURCE_CLI, args);
} catch (ParseException e) {
throw new IllegalArgumentException("Failed to parse CLI arguments: " + e.getMessage(), e.getCause());
}
}
protected MavenOptions parseMavenAtFileOptions(Path atFile) {
try (Stream<String> lines = Files.lines(atFile, StandardCharsets.UTF_8)) {
List<String> args =
lines.filter(arg -> !arg.isEmpty() && !arg.startsWith("#")).toList();
return parseArgs("atFile", args);
} catch (ParseException e) {
throw new IllegalArgumentException(
"Failed to parse arguments from file (" + atFile + "): " + e.getMessage(), e.getCause());
} catch (IOException e) {
throw new IllegalStateException("Error reading config file: " + atFile, e);
}
}
protected MavenOptions parseMavenConfigOptions(Path configFile) {
try (Stream<String> lines = Files.lines(configFile, StandardCharsets.UTF_8)) {
List<String> args =
lines.filter(arg -> !arg.isEmpty() && !arg.startsWith("#")).toList();
MavenOptions options = parseArgs("maven.config", args);
if (options.goals().isPresent()) {
// This file can only contain options, not args (goals or phases)
throw new IllegalArgumentException("Unrecognized entries in maven.config (" + configFile + ") file: "
+ options.goals().get());
}
return options;
} catch (ParseException e) {
throw new IllegalArgumentException(
"Failed to parse arguments from maven.config file (" + configFile + "): " + e.getMessage(),
e.getCause());
} catch (IOException e) {
throw new IllegalStateException("Error reading config file: " + configFile, e);
}
}
protected MavenOptions parseArgs(String source, List<String> args) throws ParseException {
return CommonsCliMavenOptions.parse(source, args.toArray(new String[0]));
}
}
|
MavenParser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.