language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/util/NameTransformerTest.java | {
"start": 201,
"end": 867
} | class ____ extends DatabindTestUtil
{
@Test
public void testSimpleTransformer() throws Exception
{
NameTransformer xfer;
xfer = NameTransformer.simpleTransformer("a", null);
assertEquals("aFoo", xfer.transform("Foo"));
assertEquals("Foo", xfer.reverse("aFoo"));
xfer = NameTransformer.simpleTransformer(null, "++");
assertEquals("foo++", xfer.transform("foo"));
assertEquals("foo", xfer.reverse("foo++"));
xfer = NameTransformer.simpleTransformer("(", ")");
assertEquals("(foo)", xfer.transform("foo"));
assertEquals("foo", xfer.reverse("(foo)"));
}
}
| NameTransformerTest |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/datasource/ReactiveTransactionalBitMapCommandsImpl.java | {
"start": 352,
"end": 3529
} | class ____<K> extends AbstractTransactionalCommands
implements ReactiveTransactionalBitMapCommands<K> {
private final ReactiveBitMapCommandsImpl<K> reactive;
public ReactiveTransactionalBitMapCommandsImpl(ReactiveTransactionalRedisDataSource ds,
ReactiveBitMapCommandsImpl<K> reactive, TransactionHolder tx) {
super(ds, tx);
this.reactive = reactive;
}
@Override
public Uni<Void> bitcount(K key) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitcount(key).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitcount(K key, long start, long end) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitcount(key, start, end).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> getbit(K key, long offset) {
this.tx.enqueue(Response::toInteger);
return this.reactive._getbit(key, offset).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitfield(K key, BitFieldArgs bitFieldArgs) {
this.tx.enqueue(this.reactive::decodeListOfLongs);
return this.reactive._bitfield(key, bitFieldArgs).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitpos(K key, int valueToLookFor) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitpos(key, valueToLookFor).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitpos(K key, int bit, long start) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitpos(key, bit, start).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitpos(K key, int bit, long start, long end) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitpos(key, bit, start, end).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitopAnd(K destination, K... keys) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitopAnd(destination, keys).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitopNot(K destination, K source) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitopNot(destination, source).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitopOr(K destination, K... keys) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitopOr(destination, keys).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> bitopXor(K destination, K... keys) {
this.tx.enqueue(Response::toLong);
return this.reactive._bitopXor(destination, keys).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> setbit(K key, long offset, int value) {
this.tx.enqueue(Response::toInteger);
return this.reactive._setbit(key, offset, value).invoke(this::queuedOrDiscard).replaceWithVoid();
}
}
| ReactiveTransactionalBitMapCommandsImpl |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/files/ManifestSuccessData.java | {
"start": 3039,
"end": 12889
} | class ____
extends AbstractManifestData<ManifestSuccessData> {
private static final Logger LOG =
LoggerFactory.getLogger(ManifestSuccessData.class);
/**
* Supported version value: {@value}.
* If this is changed the value of {@link #serialVersionUID} will change,
* to avoid deserialization problems.
*/
public static final int VERSION = 1;
/**
* Serialization ID: {@value}.
*/
private static final long serialVersionUID = 4755993198698104084L + VERSION;
/**
* Name to include in persisted data, so as to differentiate from
* any other manifests: {@value}.
*/
public static final String NAME
= "org.apache.hadoop.fs.s3a.commit.files.SuccessData/" + VERSION;
/**
* Name of file; includes version marker.
*/
private String name;
/** Timestamp of creation. */
private long timestamp;
/**
* Did this succeed?
* It is implicitly true in a _SUCCESS file, but if the file
* is also saved to a log dir, then it depends on the outcome
*/
private boolean success = true;
/** Timestamp as date string; no expectation of parseability. */
private String date;
/**
* Host which created the file (implicitly: committed the work).
*/
private String hostname;
/**
* Committer name.
*/
private String committer;
/**
* Description text.
*/
private String description;
/** Job ID, if known. */
private String jobId = "";
/**
* Source of the job ID.
*/
private String jobIdSource = "";
/**
* Metrics.
* Uses a treemap for serialization.
*/
private TreeMap<String, Long> metrics = new TreeMap<>();
/**
* Diagnostics information.
* Uses a treemap for serialization.
*/
private TreeMap<String, String> diagnostics = new TreeMap<>();
/**
* Filenames in the commit.
*/
private ArrayList<String> filenames = new ArrayList<>(0);
/**
* IOStatistics.
*/
@JsonProperty("iostatistics")
private IOStatisticsSnapshot iostatistics = new IOStatisticsSnapshot();
/**
* State (committed, aborted).
*/
private String state;
/**
* Stage: last stage executed.
*/
private String stage;
@Override
public ManifestSuccessData validate() throws IOException {
verify(name != null,
"Incompatible file format: no 'name' field");
verify(NAME.equals(name),
"Incompatible file format: " + name);
return this;
}
@Override
public JsonSerialization<ManifestSuccessData> createSerializer() {
return serializer();
}
@Override
public byte[] toBytes() throws IOException {
return serializer().toBytes(this);
}
/**
* To JSON.
* @return json string value.
* @throws IOException failure
*/
public String toJson() throws IOException {
return serializer().toJson(this);
}
@Override
public void save(FileSystem fs, Path path, boolean overwrite)
throws IOException {
// always set the name field before being saved.
name = NAME;
serializer().save(fs, path, this, overwrite);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"ManifestSuccessData{");
sb.append("committer='").append(committer).append('\'');
sb.append(", hostname='").append(hostname).append('\'');
sb.append(", description='").append(description).append('\'');
sb.append(", date='").append(date).append('\'');
sb.append(", filenames=[").append(
StringUtils.join(filenames, ", "))
.append("]");
sb.append('}');
return sb.toString();
}
/**
* Dump the metrics (if any) to a string.
* The metrics are sorted for ease of viewing.
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return the dumped string
*/
public String dumpMetrics(String prefix, String middle, String suffix) {
return joinMap(metrics, prefix, middle, suffix);
}
/**
* Dump the diagnostics (if any) to a string.
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return the dumped string
*/
public String dumpDiagnostics(String prefix, String middle, String suffix) {
return joinMap(diagnostics, prefix, middle, suffix);
}
/**
* Join any map of string to value into a string, sorting the keys first.
* @param map map to join
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return a string for reporting.
*/
protected static String joinMap(Map<String, ?> map,
String prefix,
String middle, String suffix) {
if (map == null) {
return "";
}
List<String> list = new ArrayList<>(map.keySet());
Collections.sort(list);
StringBuilder sb = new StringBuilder(list.size() * 32);
for (String k : list) {
sb.append(prefix)
.append(k)
.append(middle)
.append(map.get(k))
.append(suffix);
}
return sb.toString();
}
/**
* Load an instance from a file, then validate it.
* @param fs filesystem
* @param path path
* @return the loaded instance
* @throws IOException IO failure
*/
public static ManifestSuccessData load(FileSystem fs, Path path)
throws IOException {
LOG.debug("Reading success data from {}", path);
ManifestSuccessData instance = serializer().load(fs, path);
instance.validate();
return instance;
}
/**
* Get a JSON serializer for this class.
* @return a serializer.
*/
public static JsonSerialization<ManifestSuccessData> serializer() {
return new JsonSerialization<>(ManifestSuccessData.class, false, true);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/** @return timestamp of creation. */
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
/** @return timestamp as date; no expectation of parseability. */
public String getDate() {
return date;
}
public void setDate(String date) {
this.date = date;
}
/**
* @return host which created the file (implicitly: committed the work).
*/
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
/**
* @return committer name.
*/
public String getCommitter() {
return committer;
}
public void setCommitter(String committer) {
this.committer = committer;
}
/**
* @return any description text.
*/
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
/**
* @return any metrics.
*/
public Map<String, Long> getMetrics() {
return metrics;
}
public void setMetrics(TreeMap<String, Long> metrics) {
this.metrics = metrics;
}
/**
* @return a list of filenames in the commit.
*/
public List<String> getFilenames() {
return filenames;
}
/**
* Get the list of filenames as paths.
* @return the paths.
*/
@JsonIgnore
public List<Path> getFilenamePaths() {
return getFilenames().stream()
.map(AbstractManifestData::unmarshallPath)
.collect(Collectors.toList());
}
/**
* Set the list of filename paths.
*/
@JsonIgnore
public void setFilenamePaths(List<Path> paths) {
setFilenames(new ArrayList<>(
paths.stream()
.map(AbstractManifestData::marshallPath)
.collect(Collectors.toList())));
}
public void setFilenames(ArrayList<String> filenames) {
this.filenames = filenames;
}
public Map<String, String> getDiagnostics() {
return diagnostics;
}
public void setDiagnostics(TreeMap<String, String> diagnostics) {
this.diagnostics = diagnostics;
}
/**
* Add a diagnostics entry.
* @param key name
* @param value value
*/
public void putDiagnostic(String key, String value) {
diagnostics.put(key, value);
}
/** @return Job ID, if known. */
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public String getJobIdSource() {
return jobIdSource;
}
public void setJobIdSource(final String jobIdSource) {
this.jobIdSource = jobIdSource;
}
@Override
public IOStatisticsSnapshot getIOStatistics() {
return iostatistics;
}
public void setIOStatistics(final IOStatisticsSnapshot ioStatistics) {
this.iostatistics = ioStatistics;
}
/**
* Set the IOStatistics to a snapshot of the source.
* @param iostats. Statistics; may be null.
*/
public void snapshotIOStatistics(IOStatistics iostats) {
setIOStatistics(IOStatisticsSupport.snapshotIOStatistics(iostats));
}
/**
* Set the success flag.
* @param success did the job succeed?
*/
public void setSuccess(boolean success) {
this.success = success;
}
/**
* Get the success flag.
* @return did the job succeed?
*/
public boolean getSuccess() {
return success;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
public String getStage() {
return stage;
}
/**
* Note a failure by setting success flag to false,
* then add the exception to the diagnostics.
* @param thrown throwable
*/
public void recordJobFailure(Throwable thrown) {
setSuccess(false);
String stacktrace = ExceptionUtils.getStackTrace(thrown);
diagnostics.put(DiagnosticKeys.EXCEPTION, thrown.toString());
diagnostics.put(DiagnosticKeys.STACKTRACE, stacktrace);
}
}
| ManifestSuccessData |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 32749,
"end": 32980
} | interface ____<T> {
T get(int i);
}
""")
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.Immutable;
public | MyList |
java | apache__dubbo | dubbo-compatible/src/main/java/com/alibaba/dubbo/container/page/pages/StatusPageHandler.java | {
"start": 1469,
"end": 3538
} | class ____ implements PageHandler {
@Override
public Page handle(URL url) {
List<List<String>> rows = new ArrayList<>();
Set<String> names =
ExtensionLoader.getExtensionLoader(StatusChecker.class).getSupportedExtensions();
Map<String, Status> statuses = new HashMap<>();
for (String name : names) {
StatusChecker checker =
ExtensionLoader.getExtensionLoader(StatusChecker.class).getExtension(name);
List<String> row = new ArrayList<>();
row.add(name);
Status status = checker.check();
if (status != null && !Status.Level.UNKNOWN.equals(status.getLevel())) {
statuses.put(name, status);
row.add(getLevelHtml(status.getLevel()));
row.add(status.getMessage());
rows.add(row);
}
}
Status status = StatusUtils.getSummaryStatus(statuses);
if ("status".equals(url.getPath())) {
return new Page("", "", "", status.getLevel().toString());
} else {
List<String> row = new ArrayList<>();
row.add("summary");
row.add(getLevelHtml(status.getLevel()));
row.add("<a href=\"/status\" target=\"_blank\">summary</a>");
rows.add(row);
return new Page(
"Status (<a href=\"/status\" target=\"_blank\">summary</a>)",
"Status",
new String[] {"Name", "Status", "Description"},
rows);
}
}
private String getLevelHtml(Status.Level level) {
return "<font color=\"" + getLevelColor(level) + "\">" + level.name() + "</font>";
}
private String getLevelColor(Status.Level level) {
if (level == Status.Level.OK) {
return "green";
} else if (level == Status.Level.ERROR) {
return "red";
} else if (level == Status.Level.WARN) {
return "yellow";
}
return "gray";
}
}
| StatusPageHandler |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/restart/classloader/ClassLoaderFile.java | {
"start": 2707,
"end": 2738
} | class ____ files.
*/
public | load |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskExecutorChannelStateExecutorFactoryManagerTest.java | {
"start": 1220,
"end": 3080
} | class ____ {
@Test
void testReuseFactory() {
TaskExecutorChannelStateExecutorFactoryManager manager =
new TaskExecutorChannelStateExecutorFactoryManager();
JobID jobID = new JobID();
ChannelStateWriteRequestExecutorFactory factory = manager.getOrCreateExecutorFactory(jobID);
assertThat(manager.getOrCreateExecutorFactory(jobID))
.as("Same job should share the executor factory.")
.isSameAs(factory);
assertThat(manager.getOrCreateExecutorFactory(new JobID()))
.as("Different jobs cannot share executor factory.")
.isNotSameAs(factory);
manager.shutdown();
}
@Test
void testReleaseForJob() {
TaskExecutorChannelStateExecutorFactoryManager manager =
new TaskExecutorChannelStateExecutorFactoryManager();
JobID jobID = new JobID();
assertThat(manager.getFactoryByJobId(jobID)).isNull();
manager.getOrCreateExecutorFactory(jobID);
assertThat(manager.getFactoryByJobId(jobID)).isNotNull();
manager.releaseResourcesForJob(jobID);
assertThat(manager.getFactoryByJobId(jobID)).isNull();
manager.shutdown();
}
@Test
void testShutdown() {
TaskExecutorChannelStateExecutorFactoryManager manager =
new TaskExecutorChannelStateExecutorFactoryManager();
JobID jobID = new JobID();
manager.getOrCreateExecutorFactory(jobID);
manager.shutdown();
assertThatThrownBy(() -> manager.getOrCreateExecutorFactory(jobID))
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> manager.getOrCreateExecutorFactory(new JobID()))
.isInstanceOf(IllegalStateException.class);
}
}
| TaskExecutorChannelStateExecutorFactoryManagerTest |
java | apache__camel | components/camel-milo/src/main/java/org/apache/camel/component/milo/MiloConstants.java | {
"start": 897,
"end": 1526
} | class ____ {
// The schemes
public static final String SCHEME_BROWSE = "milo-browse";
public static final String SCHEME_CLIENT = "milo-client";
public static final String SCHEME_SERVER = "milo-server";
@Metadata(label = "producer", description = "The node ids.", javaType = "List")
public static final String HEADER_NODE_IDS = "CamelMiloNodeIds";
@Metadata(label = "producer", description = "The \"await\" setting for writes.", javaType = "Boolean",
applicableFor = SCHEME_CLIENT)
public static final String HEADER_AWAIT = "await";
private MiloConstants() {
}
}
| MiloConstants |
java | apache__camel | components/camel-fhir/camel-fhir-component/src/generated/java/org/apache/camel/component/fhir/FhirHistoryEndpointConfiguration.java | {
"start": 2184,
"end": 7432
} | class ____ extends FhirConfiguration {
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "onInstance", description="Request that the server return only up to theCount number of resources, may be NULL"), @ApiMethod(methodName = "onServer", description="Request that the server return only up to theCount number of resources, may be NULL"), @ApiMethod(methodName = "onType", description="Request that the server return only up to theCount number of resources, may be NULL")})
private Integer count;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "onInstance", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL"), @ApiMethod(methodName = "onServer", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL"), @ApiMethod(methodName = "onType", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL")})
private java.util.Date cutoff;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "onInstance", description="See ExtraParameters for a full list of parameters that can be passed, may be NULL"), @ApiMethod(methodName = "onServer", description="See ExtraParameters for a full list of parameters that can be passed, may be NULL"), @ApiMethod(methodName = "onType", description="See ExtraParameters for a full list of parameters that can be passed, may be NULL")})
private java.util.Map<org.apache.camel.component.fhir.api.ExtraParameters, Object> extraParameters;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "onInstance", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL"), @ApiMethod(methodName = "onServer", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL"), @ApiMethod(methodName = "onType", description="Request that the server return only resource versions that were created at or after the given time (inclusive), may be NULL")})
private org.hl7.fhir.instance.model.api.IPrimitiveType<java.util.Date> iCutoff;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "onInstance", description="The IIdType which must be populated with both a resource type and a resource ID at")})
private org.hl7.fhir.instance.model.api.IIdType id;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "onType", description="The resource type to search for")})
private Class<org.hl7.fhir.instance.model.api.IBaseResource> resourceType;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "onInstance", description="Request that the method return a Bundle resource (such as ca.uhn.fhir.model.dstu2.resource.Bundle). Use this method if you are accessing a DSTU2 server."), @ApiMethod(methodName = "onServer", description="Request that the method return a Bundle resource (such as ca.uhn.fhir.model.dstu2.resource.Bundle). Use this method if you are accessing a DSTU2 server."), @ApiMethod(methodName = "onType", description="Request that the method return a Bundle resource (such as ca.uhn.fhir.model.dstu2.resource.Bundle). Use this method if you are accessing a DSTU2 server.")})
private Class<org.hl7.fhir.instance.model.api.IBaseBundle> returnType;
public Integer getCount() {
return count;
}
public void setCount(Integer count) {
this.count = count;
}
public java.util.Date getCutoff() {
return cutoff;
}
public void setCutoff(java.util.Date cutoff) {
this.cutoff = cutoff;
}
public java.util.Map<org.apache.camel.component.fhir.api.ExtraParameters, Object> getExtraParameters() {
return extraParameters;
}
public void setExtraParameters(java.util.Map<org.apache.camel.component.fhir.api.ExtraParameters, Object> extraParameters) {
this.extraParameters = extraParameters;
}
public org.hl7.fhir.instance.model.api.IPrimitiveType<java.util.Date> getICutoff() {
return iCutoff;
}
public void setICutoff(org.hl7.fhir.instance.model.api.IPrimitiveType<java.util.Date> iCutoff) {
this.iCutoff = iCutoff;
}
public org.hl7.fhir.instance.model.api.IIdType getId() {
return id;
}
public void setId(org.hl7.fhir.instance.model.api.IIdType id) {
this.id = id;
}
public Class<org.hl7.fhir.instance.model.api.IBaseResource> getResourceType() {
return resourceType;
}
public void setResourceType(Class<org.hl7.fhir.instance.model.api.IBaseResource> resourceType) {
this.resourceType = resourceType;
}
public Class<org.hl7.fhir.instance.model.api.IBaseBundle> getReturnType() {
return returnType;
}
public void setReturnType(Class<org.hl7.fhir.instance.model.api.IBaseBundle> returnType) {
this.returnType = returnType;
}
}
| FhirHistoryEndpointConfiguration |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/imports/sourcewithextendsbound/astronautmapper/AstronautMapper.java | {
"start": 466,
"end": 627
} | interface ____ {
AstronautMapper INSTANCE = Mappers.getMapper( AstronautMapper.class );
AstronautDto astronautToDto(Astronaut astronaut);
}
| AstronautMapper |
java | elastic__elasticsearch | modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RussianAnalyzerProvider.java | {
"start": 883,
"end": 1453
} | class ____ extends AbstractIndexAnalyzerProvider<RussianAnalyzer> {
private final RussianAnalyzer analyzer;
RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
analyzer = new RussianAnalyzer(
Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
);
}
@Override
public RussianAnalyzer get() {
return this.analyzer;
}
}
| RussianAnalyzerProvider |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java | {
"start": 1684,
"end": 3972
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestSeveralNameNodes.class);
/** ms between failovers between NNs */
private static final int TIME_BETWEEN_FAILOVERS = 1000;
private static final int NUM_NAMENODES = 3;
private static final int NUM_THREADS = 3;
private static final int LIST_LENGTH = 50;
/** ms for length of test */
private static final long RUNTIME = 100000;
@Test
public void testCircularLinkedListWrites() throws Exception {
HAStressTestHarness harness = new HAStressTestHarness();
// setup the harness
harness.setNumberOfNameNodes(NUM_NAMENODES);
harness.addFailoverThread(TIME_BETWEEN_FAILOVERS);
harness.conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 1000);
harness.conf.setInt(HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, 128);
final MiniDFSCluster cluster = harness.startCluster();
try {
cluster.waitActive();
cluster.transitionToActive(0);
// setup the a circular writer
FileSystem fs = harness.getFailoverFs();
TestContext context = harness.testCtx;
List<CircularWriter> writers = new ArrayList<CircularWriter>();
for (int i = 0; i < NUM_THREADS; i++) {
Path p = new Path("/test-" + i);
fs.mkdirs(p);
CircularWriter writer = new CircularWriter(context, LIST_LENGTH, fs, p);
writers.add(writer);
context.addThread(writer);
}
harness.startThreads();
// wait for all the writer threads to finish, or that we exceed the time
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < RUNTIME &&
writers.size() > 0) {
for (int i = 0; i < writers.size(); i++) {
CircularWriter writer = writers.get(i);
// remove the writer from the ones to check
if (writer.done.await(100, TimeUnit.MILLISECONDS)) {
writers.remove(i--);
}
}
}
assertEquals(0, writers.size(),
"Some writers didn't complete in expected runtime! Current writer state:" + writers);
harness.stopThreads();
} finally {
System.err.println("===========================\n\n\n\n");
harness.shutdown();
}
}
private static | TestSeveralNameNodes |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/AbstractNamedWriteableTestCase.java | {
"start": 1146,
"end": 1748
} | class ____<T extends NamedWriteable> extends AbstractWireTestCase<T> {
// Force subclasses to override to customize the registry for their NamedWriteable
@Override
protected abstract NamedWriteableRegistry getNamedWriteableRegistry();
/**
* The type of {@link NamedWriteable} to read.
*/
protected abstract Class<T> categoryClass();
@Override
protected T copyInstance(T instance, TransportVersion version) throws IOException {
return copyNamedWriteable(instance, getNamedWriteableRegistry(), categoryClass(), version);
}
}
| AbstractNamedWriteableTestCase |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/boot/xml/JAXBElementSubstitution.java | {
"start": 287,
"end": 746
} | class ____ implements ObjectSubstitution<JAXBElement, JAXBElementSubstitution.Serialized> {
@Override
public Serialized serialize(JAXBElement obj) {
return new Serialized(obj.getName(), obj.getDeclaredType(), obj.getScope(), obj.getValue());
}
@Override
public JAXBElement deserialize(Serialized obj) {
return new JAXBElement(obj.name, obj.declaredType, obj.scope, obj.value);
}
public static | JAXBElementSubstitution |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedClassIntegrationTests.java | {
"start": 45599,
"end": 45775
} | class ____ {
@Parameter
String value;
@Test
void test() {
assertTrue(value.equals("foo") || value.equals("bar"));
}
}
static | ArgumentsSourceFieldInjectionTestCase |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/job/SubtaskCurrentAttemptDetailsHandler.java | {
"start": 2294,
"end": 4757
} | class ____
extends AbstractSubtaskHandler<
SubtaskExecutionAttemptDetailsInfo, SubtaskMessageParameters> {
private final MetricFetcher metricFetcher;
public SubtaskCurrentAttemptDetailsHandler(
GatewayRetriever<? extends RestfulGateway> leaderRetriever,
Duration timeout,
Map<String, String> responseHeaders,
MessageHeaders<
EmptyRequestBody,
SubtaskExecutionAttemptDetailsInfo,
SubtaskMessageParameters>
messageHeaders,
ExecutionGraphCache executionGraphCache,
Executor executor,
MetricFetcher metricFetcher) {
super(
leaderRetriever,
timeout,
responseHeaders,
messageHeaders,
executionGraphCache,
executor);
this.metricFetcher = Preconditions.checkNotNull(metricFetcher);
}
@Override
protected SubtaskExecutionAttemptDetailsInfo handleRequest(
HandlerRequest<EmptyRequestBody> request, AccessExecutionVertex executionVertex)
throws RestHandlerException {
final AccessExecution execution = executionVertex.getCurrentExecutionAttempt();
final JobID jobID = request.getPathParameter(JobIDPathParameter.class);
final JobVertexID jobVertexID = request.getPathParameter(JobVertexIdPathParameter.class);
final Collection<AccessExecution> attempts = executionVertex.getCurrentExecutions();
List<SubtaskExecutionAttemptDetailsInfo> otherConcurrentAttempts = null;
metricFetcher.update();
MetricStore.JobMetricStoreSnapshot jobMetrics = metricFetcher.getMetricStore().getJobs();
if (attempts.size() > 1) {
otherConcurrentAttempts = new ArrayList<>();
for (AccessExecution attempt : attempts) {
if (attempt.getAttemptNumber() != execution.getAttemptNumber()) {
otherConcurrentAttempts.add(
SubtaskExecutionAttemptDetailsInfo.create(
attempt, jobMetrics, jobID, jobVertexID, null));
}
}
}
return SubtaskExecutionAttemptDetailsInfo.create(
execution, jobMetrics, jobID, jobVertexID, otherConcurrentAttempts);
}
}
| SubtaskCurrentAttemptDetailsHandler |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/main/java/org/apache/camel/spring/xml/SpringErrorHandlerType.java | {
"start": 1358,
"end": 1551
} | enum ____ {
DefaultErrorHandler,
DeadLetterChannel,
NoErrorHandler,
TransactionErrorHandler;
/**
* Get the type as class.
*
* @return the | SpringErrorHandlerType |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/internal/util/SerializationHelper.java | {
"start": 8860,
"end": 8915
} | class ____ we should use.
*/
private static final | loader |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/ai/remote/request/QueryAgentCardRequest.java | {
"start": 751,
"end": 1253
} | class ____ extends AbstractAgentRequest {
private String version;
private String registrationType;
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getRegistrationType() {
return registrationType;
}
public void setRegistrationType(String registrationType) {
this.registrationType = registrationType;
}
}
| QueryAgentCardRequest |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReport.java | {
"start": 9111,
"end": 10144
} | class ____ {
private final Condition condition;
private final ConditionOutcome outcome;
public ConditionAndOutcome(Condition condition, ConditionOutcome outcome) {
this.condition = condition;
this.outcome = outcome;
}
public Condition getCondition() {
return this.condition;
}
public ConditionOutcome getOutcome() {
return this.outcome;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ConditionAndOutcome other = (ConditionAndOutcome) obj;
return (ObjectUtils.nullSafeEquals(this.condition.getClass(), other.condition.getClass())
&& ObjectUtils.nullSafeEquals(this.outcome, other.outcome));
}
@Override
public int hashCode() {
return this.condition.getClass().hashCode() * 31 + this.outcome.hashCode();
}
@Override
public String toString() {
return this.condition.getClass() + " " + this.outcome;
}
}
private static final | ConditionAndOutcome |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/aot/generate/DefaultGenerationContextTests.java | {
"start": 1197,
"end": 5931
} | class ____ {
private static final ClassName SAMPLE_TARGET = ClassName.get("com.example", "SampleTarget");
private static final Consumer<TypeSpec.Builder> typeSpecCustomizer = type -> {};
private final GeneratedClasses generatedClasses = new GeneratedClasses(
new ClassNameGenerator(SAMPLE_TARGET));
private final InMemoryGeneratedFiles generatedFiles = new InMemoryGeneratedFiles();
private final RuntimeHints runtimeHints = new RuntimeHints();
@Test
void createWithOnlyGeneratedFilesCreatesContext() {
DefaultGenerationContext context = new DefaultGenerationContext(
new ClassNameGenerator(SAMPLE_TARGET), this.generatedFiles);
assertThat(context.getGeneratedFiles()).isSameAs(this.generatedFiles);
assertThat(context.getRuntimeHints()).isInstanceOf(RuntimeHints.class);
}
@Test
void createCreatesContext() {
DefaultGenerationContext context = new DefaultGenerationContext(
this.generatedClasses, this.generatedFiles, this.runtimeHints);
assertThat(context.getGeneratedFiles()).isNotNull();
assertThat(context.getRuntimeHints()).isNotNull();
}
@Test
void createWhenGeneratedClassesIsNullThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new DefaultGenerationContext((GeneratedClasses) null,
this.generatedFiles, this.runtimeHints))
.withMessage("'generatedClasses' must not be null");
}
@Test
void createWhenGeneratedFilesIsNullThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new DefaultGenerationContext(this.generatedClasses,
null, this.runtimeHints))
.withMessage("'generatedFiles' must not be null");
}
@Test
void createWhenRuntimeHintsIsNullThrowsException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new DefaultGenerationContext(this.generatedClasses,
this.generatedFiles, null))
.withMessage("'runtimeHints' must not be null");
}
@Test
void getGeneratedClassesReturnsClassNameGenerator() {
DefaultGenerationContext context = new DefaultGenerationContext(
this.generatedClasses, this.generatedFiles, this.runtimeHints);
assertThat(context.getGeneratedClasses()).isSameAs(this.generatedClasses);
}
@Test
void getGeneratedFilesReturnsGeneratedFiles() {
DefaultGenerationContext context = new DefaultGenerationContext(
this.generatedClasses, this.generatedFiles, this.runtimeHints);
assertThat(context.getGeneratedFiles()).isSameAs(this.generatedFiles);
}
@Test
void getRuntimeHintsReturnsRuntimeHints() {
DefaultGenerationContext context = new DefaultGenerationContext(
this.generatedClasses, this.generatedFiles, this.runtimeHints);
assertThat(context.getRuntimeHints()).isSameAs(this.runtimeHints);
}
@Test
void withNameUpdateNamingConvention() {
DefaultGenerationContext context = new DefaultGenerationContext(
new ClassNameGenerator(SAMPLE_TARGET), this.generatedFiles);
GenerationContext anotherContext = context.withName("Another");
GeneratedClass generatedClass = anotherContext.getGeneratedClasses()
.addForFeature("Test", typeSpecCustomizer);
assertThat(generatedClass.getName().simpleName()).endsWith("__AnotherTest");
}
@Test
void withNameKeepsTrackOfAllGeneratedFiles() {
DefaultGenerationContext context = new DefaultGenerationContext(
new ClassNameGenerator(SAMPLE_TARGET), this.generatedFiles);
context.getGeneratedClasses().addForFeature("Test", typeSpecCustomizer);
GenerationContext anotherContext = context.withName("Another");
assertThat(anotherContext.getGeneratedClasses()).isNotSameAs(context.getGeneratedClasses());
assertThat(anotherContext.getGeneratedFiles()).isSameAs(context.getGeneratedFiles());
assertThat(anotherContext.getRuntimeHints()).isSameAs(context.getRuntimeHints());
anotherContext.getGeneratedClasses().addForFeature("Test", typeSpecCustomizer);
context.writeGeneratedContent();
assertThat(this.generatedFiles.getGeneratedFiles(Kind.SOURCE)).hasSize(2);
}
@Test
void withNameGeneratesUniqueName() {
DefaultGenerationContext context = new DefaultGenerationContext(
new ClassNameGenerator(SAMPLE_TARGET), this.generatedFiles);
context.withName("Test").getGeneratedClasses()
.addForFeature("Feature", typeSpecCustomizer);
context.withName("Test").getGeneratedClasses()
.addForFeature("Feature", typeSpecCustomizer);
context.withName("Test").getGeneratedClasses()
.addForFeature("Feature", typeSpecCustomizer);
context.writeGeneratedContent();
assertThat(this.generatedFiles.getGeneratedFiles(Kind.SOURCE)).containsOnlyKeys(
"com/example/SampleTarget__TestFeature.java",
"com/example/SampleTarget__Test1Feature.java",
"com/example/SampleTarget__Test2Feature.java");
}
}
| DefaultGenerationContextTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamingRuntimeContextTest.java | {
"start": 18403,
"end": 32103
} | class ____ really registered, i.e., the execution config was applied
assertThat(serializer).isInstanceOf(KryoSerializer.class);
assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId())
.isPositive();
}
// ------------------------------------------------------------------------
//
// ------------------------------------------------------------------------
private StreamingRuntimeContext createMapOperatorRuntimeContext() throws Exception {
AbstractStreamOperator<?> mapPlainMockOp = createMapPlainMockOp();
return createRuntimeContext(mapPlainMockOp);
}
private StreamingRuntimeContext createRuntimeContext() throws Exception {
return new StreamingRuntimeContext(
createListPlainMockOp(), MockEnvironment.builder().build(), Collections.emptyMap());
}
private StreamingRuntimeContext createRuntimeContext(
AtomicReference<Object> descriptorCapture, ExecutionConfig config, boolean stateV2)
throws Exception {
return createDescriptorCapturingMockOp(
descriptorCapture,
config,
MockEnvironment.builder().setExecutionConfig(config).build(),
stateV2)
.getRuntimeContext();
}
private StreamingRuntimeContext createRuntimeContext(AbstractStreamOperator<?> operator) {
return new StreamingRuntimeContext(
MockEnvironment.builder().build(),
Collections.emptyMap(),
operator.getMetricGroup(),
operator.getOperatorID(),
operator.getProcessingTimeService(),
operator.getKeyedStateStore(),
ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES);
}
@SuppressWarnings("unchecked")
private static AbstractStreamOperator<?> createDescriptorCapturingMockOp(
final AtomicReference<Object> ref,
final ExecutionConfig config,
Environment environment,
boolean stateV2)
throws Exception {
StreamConfig streamConfig = new StreamConfig(new Configuration());
streamConfig.setOperatorID(new OperatorID());
AbstractStreamOperator<?> operator =
new AbstractStreamOperator<Object>(
new StreamOperatorParameters<>(
new MockStreamTaskBuilder(environment)
.setExecutionConfig(config)
.build(),
streamConfig,
new CollectorOutput<>(new ArrayList<>()),
TestProcessingTimeService::new,
null,
null)) {
@Override
protected void setup(
StreamTask<?, ?> containingTask,
StreamConfig config,
Output<StreamRecord<Object>> output) {
super.setup(containingTask, config, output);
}
};
StreamTaskStateInitializer streamTaskStateManager =
new StreamTaskStateInitializerImpl(environment, new HashMapStateBackend());
KeyedStateBackend keyedStateBackend = mock(KeyedStateBackend.class);
AsyncKeyedStateBackend asyncKeyedStateBackend = mock(AsyncKeyedStateBackend.class);
DefaultKeyedStateStore keyedStateStore =
new DefaultKeyedStateStore(
keyedStateBackend,
asyncKeyedStateBackend,
new SerializerFactory() {
@Override
public <T> TypeSerializer<T> createSerializer(
TypeInformation<T> typeInformation) {
return typeInformation.createSerializer(
config.getSerializerConfig());
}
});
doAnswer(
(Answer<Object>)
invocationOnMock -> {
ref.set(invocationOnMock.getArguments()[2]);
return null;
})
.when(keyedStateBackend)
.getPartitionedState(
ArgumentMatchers.any(),
any(TypeSerializer.class),
any(StateDescriptor.class));
doAnswer(
(Answer<Object>)
invocationOnMock -> {
ref.set(invocationOnMock.getArguments()[2]);
return null;
})
.when(asyncKeyedStateBackend)
.getOrCreateKeyedState(
any(),
any(TypeSerializer.class),
any(org.apache.flink.api.common.state.v2.StateDescriptor.class));
operator.initializeState(streamTaskStateManager);
if (stateV2) {
keyedStateStore.setSupportKeyedStateApiSetV2();
}
operator.getRuntimeContext().setKeyedStateStore(keyedStateStore);
return operator;
}
@SuppressWarnings("unchecked")
private static AbstractStreamOperator<?> createListPlainMockOp() throws Exception {
AbstractStreamOperator<?> operatorMock = mock(AbstractStreamOperator.class);
ExecutionConfig config = new ExecutionConfig();
KeyedStateBackend keyedStateBackend = mock(KeyedStateBackend.class);
DefaultKeyedStateStore keyedStateStore =
new DefaultKeyedStateStore(
keyedStateBackend,
new SerializerFactory() {
@Override
public <T> TypeSerializer<T> createSerializer(
TypeInformation<T> typeInformation) {
return typeInformation.createSerializer(
config.getSerializerConfig());
}
});
when(operatorMock.getExecutionConfig()).thenReturn(config);
doAnswer(
new Answer<ListState<String>>() {
@Override
public ListState<String> answer(InvocationOnMock invocationOnMock)
throws Throwable {
ListStateDescriptor<String> descr =
(ListStateDescriptor<String>)
invocationOnMock.getArguments()[2];
AbstractStateBackend abstractStateBackend =
new HashMapStateBackend();
Environment env = new DummyEnvironment("test_task", 1, 0);
JobID jobID = new JobID();
KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
TaskKvStateRegistry kvStateRegistry =
new KvStateRegistry()
.createTaskRegistry(new JobID(), new JobVertexID());
CloseableRegistry cancelStreamRegistry = new CloseableRegistry();
AbstractKeyedStateBackend<Integer> backend =
abstractStateBackend.createKeyedStateBackend(
new KeyedStateBackendParametersImpl<>(
env,
jobID,
"test_op",
IntSerializer.INSTANCE,
1,
keyGroupRange,
kvStateRegistry,
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
cancelStreamRegistry));
backend.setCurrentKey(0);
return backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
descr);
}
})
.when(keyedStateBackend)
.getPartitionedState(
ArgumentMatchers.any(),
any(TypeSerializer.class),
any(ListStateDescriptor.class));
when(operatorMock.getKeyedStateStore()).thenReturn(keyedStateStore);
when(operatorMock.getOperatorID()).thenReturn(new OperatorID());
return operatorMock;
}
@SuppressWarnings("unchecked")
private static AbstractStreamOperator<?> createMapPlainMockOp() throws Exception {
AbstractStreamOperator<?> operatorMock = mock(AbstractStreamOperator.class);
ExecutionConfig config = new ExecutionConfig();
KeyedStateBackend keyedStateBackend = mock(KeyedStateBackend.class);
DefaultKeyedStateStore keyedStateStore =
new DefaultKeyedStateStore(
keyedStateBackend,
new SerializerFactory() {
@Override
public <T> TypeSerializer<T> createSerializer(
TypeInformation<T> typeInformation) {
return typeInformation.createSerializer(
config.getSerializerConfig());
}
});
when(operatorMock.getExecutionConfig()).thenReturn(config);
doAnswer(
new Answer<MapState<Integer, String>>() {
@Override
public MapState<Integer, String> answer(
InvocationOnMock invocationOnMock) throws Throwable {
MapStateDescriptor<Integer, String> descr =
(MapStateDescriptor<Integer, String>)
invocationOnMock.getArguments()[2];
AbstractStateBackend abstractStateBackend =
new HashMapStateBackend();
Environment env = new DummyEnvironment("test_task", 1, 0);
JobID jobID = new JobID();
KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
TaskKvStateRegistry kvStateRegistry =
new KvStateRegistry()
.createTaskRegistry(new JobID(), new JobVertexID());
CloseableRegistry cancelStreamRegistry = new CloseableRegistry();
AbstractKeyedStateBackend<Integer> backend =
abstractStateBackend.createKeyedStateBackend(
new KeyedStateBackendParametersImpl<>(
env,
jobID,
"test_op",
IntSerializer.INSTANCE,
1,
keyGroupRange,
kvStateRegistry,
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
cancelStreamRegistry));
backend.setCurrentKey(0);
return backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
descr);
}
})
.when(keyedStateBackend)
.getPartitionedState(
ArgumentMatchers.any(),
any(TypeSerializer.class),
any(MapStateDescriptor.class));
when(operatorMock.getKeyedStateStore()).thenReturn(keyedStateStore);
when(operatorMock.getOperatorID()).thenReturn(new OperatorID());
when(operatorMock.getProcessingTimeService()).thenReturn(new TestProcessingTimeService());
return operatorMock;
}
}
| is |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/upgrade/AbstractHttpUpgradeCheckTestBase.java | {
"start": 1025,
"end": 2853
} | class ____ {
@Inject
Vertx vertx;
@TestHTTPResource("opening")
URI openingUri;
@TestHTTPResource("responding")
URI respondingUri;
@TestHTTPResource("rejecting")
URI rejectingUri;
@BeforeEach
public void cleanUp() {
Opening.OPENED.set(false);
OpeningHttpUpgradeCheck.INVOKED.set(0);
}
@Test
public void testHttpUpgradeRejected() {
try (WSClient client = new WSClient(vertx)) {
CompletionException ce = assertThrows(CompletionException.class,
() -> client.connect(
new WebSocketConnectOptions().addHeader(RejectingHttpUpgradeCheck.REJECT_HEADER, "ignored"),
rejectingUri));
Throwable root = ExceptionUtil.getRootCause(ce);
assertInstanceOf(UpgradeRejectedException.class, root);
assertTrue(root.getMessage().contains("403"), root.getMessage());
}
}
@Test
public void testHttpUpgradePermitted() {
try (WSClient client = new WSClient(vertx)) {
client.connect(openingUri);
Awaitility.await().atMost(Duration.ofSeconds(2)).until(() -> OpeningHttpUpgradeCheck.INVOKED.get() == 1);
}
}
@Test
public void testHttpUpgradeOkAndResponding() {
// test no HTTP Upgrade check rejected the upgrade or recorded value
try (WSClient client = new WSClient(vertx)) {
client.connect(new WebSocketConnectOptions(), respondingUri);
var response = client.sendAndAwaitReply("Ho").toString();
assertEquals("Ho Hey", response);
assertEquals(0, OpeningHttpUpgradeCheck.INVOKED.get());
}
}
@WebSocket(path = "/rejecting", endpointId = "rejecting-id")
public static | AbstractHttpUpgradeCheckTestBase |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/DenseEmbeddingResults.java | {
"start": 318,
"end": 689
} | interface ____<E extends EmbeddingResults.Embedding<E>> extends EmbeddingResults<E> {
/**
* Returns the first embedding entry in the result list's array size.
* @return the size of the embedding
* @throws IllegalStateException if the list of embeddings is empty
*/
int getFirstEmbeddingSize() throws IllegalStateException;
}
| DenseEmbeddingResults |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/servletapi/HttpServlet3RequestFactory.java | {
"start": 8146,
"end": 12273
} | class ____ extends SecurityContextHolderAwareRequestWrapper {
private final HttpServletResponse response;
Servlet3SecurityContextHolderAwareRequestWrapper(HttpServletRequest request, String rolePrefix,
HttpServletResponse response) {
super(request, HttpServlet3RequestFactory.this.trustResolver, rolePrefix);
this.response = response;
}
@Override
public @Nullable AsyncContext getAsyncContext() {
AsyncContext asyncContext = super.getAsyncContext();
if (asyncContext == null) {
return null;
}
return new SecurityContextAsyncContext(asyncContext);
}
@Override
public AsyncContext startAsync() {
AsyncContext startAsync = super.startAsync();
return new SecurityContextAsyncContext(startAsync);
}
@Override
public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse)
throws IllegalStateException {
AsyncContext startAsync = super.startAsync(servletRequest, servletResponse);
return new SecurityContextAsyncContext(startAsync);
}
@Override
public boolean authenticate(HttpServletResponse response) throws IOException, ServletException {
AuthenticationEntryPoint entryPoint = HttpServlet3RequestFactory.this.authenticationEntryPoint;
if (entryPoint == null) {
HttpServlet3RequestFactory.this.logger.debug(
"authenticationEntryPoint is null, so allowing original HttpServletRequest to handle authenticate");
return super.authenticate(response);
}
if (isAuthenticated()) {
return true;
}
entryPoint.commence(this, response,
new AuthenticationCredentialsNotFoundException("User is not Authenticated"));
return false;
}
@Override
public void login(String username, String password) throws ServletException {
if (isAuthenticated()) {
throw new ServletException("Cannot perform login for '" + username + "' already authenticated as '"
+ getRemoteUser() + "'");
}
AuthenticationManager authManager = HttpServlet3RequestFactory.this.authenticationManager;
if (authManager == null) {
HttpServlet3RequestFactory.this.logger
.debug("authenticationManager is null, so allowing original HttpServletRequest to handle login");
super.login(username, password);
return;
}
Authentication authentication = getAuthentication(authManager, username, password);
SecurityContext context = HttpServlet3RequestFactory.this.securityContextHolderStrategy
.createEmptyContext();
context.setAuthentication(authentication);
HttpServlet3RequestFactory.this.securityContextHolderStrategy.setContext(context);
HttpServlet3RequestFactory.this.securityContextRepository.saveContext(context, this, this.response);
}
private Authentication getAuthentication(AuthenticationManager authManager, String username, String password)
throws ServletException {
try {
UsernamePasswordAuthenticationToken authentication = UsernamePasswordAuthenticationToken
.unauthenticated(username, password);
Object details = HttpServlet3RequestFactory.this.authenticationDetailsSource.buildDetails(this);
authentication.setDetails(details);
return authManager.authenticate(authentication);
}
catch (AuthenticationException ex) {
HttpServlet3RequestFactory.this.securityContextHolderStrategy.clearContext();
throw new ServletException(ex.getMessage(), ex);
}
}
@Override
public void logout() throws ServletException {
List<LogoutHandler> handlers = HttpServlet3RequestFactory.this.logoutHandlers;
if (CollectionUtils.isEmpty(handlers)) {
HttpServlet3RequestFactory.this.logger
.debug("logoutHandlers is null, so allowing original HttpServletRequest to handle logout");
super.logout();
return;
}
Authentication authentication = HttpServlet3RequestFactory.this.securityContextHolderStrategy.getContext()
.getAuthentication();
for (LogoutHandler handler : handlers) {
handler.logout(this, this.response, authentication);
}
}
private boolean isAuthenticated() {
return getUserPrincipal() != null;
}
}
private static | Servlet3SecurityContextHolderAwareRequestWrapper |
java | apache__hadoop | hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRootDirLive.java | {
"start": 1073,
"end": 1299
} | class ____
extends AbstractContractRootDirectoryTest {
@Override
protected AbstractFSContract createContract(Configuration configuration) {
return new AdlStorageContract(configuration);
}
}
| TestAdlContractRootDirLive |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/AbstractExecutionThreadServiceTest.java | {
"start": 12085,
"end": 13211
} | class ____ extends AbstractExecutionThreadService implements TearDown {
private final ExecutorService executor = newSingleThreadExecutor();
FakeService() {
tearDownStack.addTearDown(this);
}
volatile int startupCalled = 0;
volatile int shutdownCalled = 0;
volatile int runCalled = 0;
@Override
protected void startUp() throws Exception {
assertEquals(0, startupCalled);
assertEquals(0, runCalled);
assertEquals(0, shutdownCalled);
startupCalled++;
}
@Override
protected void run() throws Exception {
assertEquals(1, startupCalled);
assertEquals(0, runCalled);
assertEquals(0, shutdownCalled);
runCalled++;
}
@Override
protected void shutDown() throws Exception {
assertEquals(1, startupCalled);
assertEquals(0, shutdownCalled);
assertEquals(Service.State.STOPPING, state());
shutdownCalled++;
}
@Override
protected Executor executor() {
return executor;
}
@Override
public void tearDown() throws Exception {
executor.shutdown();
}
}
}
| FakeService |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java | {
"start": 5819,
"end": 6011
} | class ____ a snapshot and then
* its serialVersionUID is changed in an uncontrolled way. This lets us deserialize the old
* snapshot assuming the binary representation of the faulty | into |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/Timeout.java | {
"start": 873,
"end": 1170
} | class ____ its {@link Nested @Nested} classes.
*
* <p>This annotation may also be used on lifecycle methods annotated with
* {@link BeforeAll @BeforeAll}, {@link BeforeEach @BeforeEach},
* {@link AfterEach @AfterEach}, or {@link AfterAll @AfterAll}.
*
* <p>Applying this annotation to a test | and |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2600/Issue2628.java | {
"start": 147,
"end": 1019
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
long MAX_LONG = Long.MAX_VALUE; //9223372036854775807
long MIN_LONG = Long.MIN_VALUE; //-9223372036854775808
String s1 = "9423372036854775807"; //-9423372036854775808
BigInteger bi1 = JSON.parseObject(s1, BigInteger.class); //没问题
assertEquals("9423372036854775807", bi1.toString());
BigInteger bi2 = new BigInteger(s1); //没问题
assertEquals("9423372036854775807", bi2.toString());
Tobject tobj1 = new Tobject();
tobj1.setBi(bi2); //没问题
assertEquals("9423372036854775807", tobj1.getBi().toString());;
String s2 = JSON.toJSONString(tobj1);
Tobject tobj2 = JSON.parseObject(s2, Tobject.class); //有问题
assertEquals("9423372036854775807", tobj2.getBi().toString());
}
static | Issue2628 |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/config/build/exporter/OtlpExporterBuildConfig.java | {
"start": 366,
"end": 698
} | interface ____ {
/**
* Legacy property kept for compatibility reasons. Just the defining the right exporter is enough.
* <p>
* Maps to quarkus.opentelemetry.tracer.exporter.otlp.enabled and will be removed in the future
*/
@Deprecated
@WithDefault("true")
boolean enabled();
}
| OtlpExporterBuildConfig |
java | apache__kafka | coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java | {
"start": 1291,
"end": 4667
} | class ____<T, U> implements CoordinatorTimer<T, U> {
/**
* Represents a scheduled timeout.
*/
public record ScheduledTimeout<T, U>(String key, long deadlineMs, TimeoutOperation<T, U> operation) {
}
/**
* Represents an expired timeout.
*/
public record ExpiredTimeout<T, U>(String key, CoordinatorResult<T, U> result) {
}
private final Time time;
private final Map<String, ScheduledTimeout<T, U>> timeoutMap = new HashMap<>();
private final PriorityQueue<ScheduledTimeout<T, U>> timeoutQueue = new PriorityQueue<>(
Comparator.comparingLong(entry -> entry.deadlineMs)
);
public MockCoordinatorTimer(Time time) {
this.time = time;
}
/**
* Schedules a timeout.
*/
@Override
public void schedule(
String key,
long delay,
TimeUnit unit,
boolean retry,
long retryBackoff,
TimeoutOperation<T, U> operation
) {
cancel(key);
long deadlineMs = time.milliseconds() + unit.toMillis(delay);
ScheduledTimeout<T, U> timeout = new ScheduledTimeout<>(key, deadlineMs, operation);
timeoutQueue.add(timeout);
timeoutMap.put(key, timeout);
}
@Override
public void schedule(
String key,
long delay,
TimeUnit unit,
boolean retry,
TimeoutOperation<T, U> operation
) {
schedule(key, delay, unit, retry, 500L, operation);
}
@Override
public void scheduleIfAbsent(
String key,
long delay,
TimeUnit unit,
boolean retry,
TimeoutOperation<T, U> operation
) {
if (!timeoutMap.containsKey(key)) {
schedule(key, delay, unit, retry, 500L, operation);
}
}
/**
* Cancels a timeout.
*/
@Override
public void cancel(String key) {
ScheduledTimeout<T, U> timeout = timeoutMap.remove(key);
if (timeout != null) {
timeoutQueue.remove(timeout);
}
}
/**
* Checks if a timeout with the given key is scheduled.
*/
@Override
public boolean isScheduled(String key) {
return timeoutMap.containsKey(key);
}
/**
* @return True if a timeout with the key exists; false otherwise.
*/
public boolean contains(String key) {
return timeoutMap.containsKey(key);
}
/**
* @return The scheduled timeout for the key; null otherwise.
*/
public ScheduledTimeout<T, U> timeout(String key) {
return timeoutMap.get(key);
}
/**
* @return The number of scheduled timeouts.
*/
public int size() {
return timeoutMap.size();
}
/**
* @return A list of expired timeouts based on the current time.
*/
public List<ExpiredTimeout<T, U>> poll() {
List<ExpiredTimeout<T, U>> results = new ArrayList<>();
ScheduledTimeout<T, U> timeout = timeoutQueue.peek();
while (timeout != null && timeout.deadlineMs <= time.milliseconds()) {
timeoutQueue.poll();
timeoutMap.remove(timeout.key, timeout);
results.add(new ExpiredTimeout<>(
timeout.key,
timeout.operation.generateRecords()
));
timeout = timeoutQueue.peek();
}
return results;
}
}
| MockCoordinatorTimer |
java | quarkusio__quarkus | extensions/reactive-mssql-client/deployment/src/test/java/io/quarkus/reactive/mssql/client/MSSQLPoolCreatorTest.java | {
"start": 268,
"end": 951
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(CustomCredentialsProvider.class)
.addClass(CredentialsTestResource.class)
.addClass(LocalhostMSSQLPoolCreator.class)
.addAsResource("application-credentials-with-erroneous-url.properties", "application.properties"));
@Test
public void testConnect() {
given()
.when().get("/test")
.then()
.statusCode(200)
.body(CoreMatchers.equalTo("OK"));
}
}
| MSSQLPoolCreatorTest |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/main/java/io/quarkus/hibernate/orm/deployment/HibernateConfigUtil.java | {
"start": 250,
"end": 1963
} | class ____ {
/**
* TODO: reuse the ones from QuarkusInfinispanRegionFactory as soon as they are made public.
*/
private final static String EXPIRATION_MAX_IDLE = ".expiration.max-idle";
private final static String MEMORY_OBJECT_COUNT = ".memory.object-count";
private static final String HIBERNATE_CACHE_PREFIX = "hibernate.cache.";
public static Map<String, String> getCacheConfigEntries(HibernateOrmConfigPersistenceUnit config) {
Map<String, String> cacheRegionsConfigEntries = new HashMap<>();
for (Map.Entry<String, HibernateOrmConfigPersistenceUnitCache> regionEntry : config.cache().entrySet()) {
String regionName = regionEntry.getKey();
HibernateOrmConfigPersistenceUnitCache cacheConfig = regionEntry.getValue();
if (cacheConfig.expiration().maxIdle().isPresent()) {
cacheRegionsConfigEntries.put(getCacheConfigKey(regionName, EXPIRATION_MAX_IDLE),
String.valueOf(cacheConfig.expiration().maxIdle().get().getSeconds()));
}
if (cacheConfig.memory().objectCount().isPresent()) {
cacheRegionsConfigEntries.put(getCacheConfigKey(regionName, MEMORY_OBJECT_COUNT),
String.valueOf(cacheConfig.memory().objectCount().getAsLong()));
}
}
return cacheRegionsConfigEntries;
}
private static String getCacheConfigKey(String regionName, String configKey) {
return HIBERNATE_CACHE_PREFIX + regionName + configKey;
}
public static <T> OptionalInt firstPresent(OptionalInt first, OptionalInt second) {
return first.isPresent() ? first : second;
}
}
| HibernateConfigUtil |
java | alibaba__nacos | test/core-test/src/test/java/com/alibaba/nacos/test/core/auth/AuthBase.java | {
"start": 1317,
"end": 10952
} | class ____ extends HttpClient4Test {
protected String accessToken;
protected String username1 = "username1";
protected String password1 = "password1";
protected String username2 = "username2";
protected String password2 = "password2";
protected String username3 = "username3";
protected String password3 = "password3";
protected String role1 = "role1";
protected String role2 = "role2";
protected String role3 = "role3";
protected Properties properties;
protected String namespace1 = "namespace1";
/**
* Logs in a user with the provided username and password, then returns the access token.
*
* @param username the username of the user
* @param password the password of the user
* @return the access token (accessToken) as a String
* @throws AssertionError if the response status is not 2xx or the accessToken is missing
*/
public String login(String username, String password) {
ResponseEntity<String> response = request("/nacos/v1/auth/users/login",
Params.newParams().appendParam("username", username).appendParam("password", password).done(),
String.class, HttpMethod.POST);
assertTrue(response.getStatusCode().is2xxSuccessful());
JsonNode json = JacksonUtils.toObj(response.getBody());
assertTrue(json.has("accessToken"));
return json.get("accessToken").textValue();
}
public String login() {
return login("nacos", "nacos");
}
protected void init(int port) throws Exception {
AuthConfigs.setCachingEnabled(false);
TimeUnit.SECONDS.sleep(5L);
String url = String.format("http://localhost:%d/", port);
System.setProperty("nacos.core.auth.enabled", "true");
this.base = new URL(url);
accessToken = login();
// Create a user:
ResponseEntity<String> response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username1).appendParam("password", password1)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Create a user:
response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username2).appendParam("password", password2)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Create a user:
response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username3).appendParam("password", password3)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Create a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role1).appendParam("username", username1)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Create a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role2).appendParam("username", username2)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Create a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role3).appendParam("username", username3)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Add read permission of namespace1 to role1:
response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role1).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "r").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Add write permission of namespace1 to role2:
response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role2).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "w").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Add read/write permission of namespace1 to role3:
response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role3).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "rw").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.POST);
System.out.println(response);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Init properties:
properties = new Properties();
properties.put(PropertyKeyConst.NAMESPACE, namespace1);
properties.put(PropertyKeyConst.SERVER_ADDR, "127.0.0.1" + ":" + port);
}
protected void destroy() {
// Delete permission:
ResponseEntity<String> response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role1).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "r").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete permission:
response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role2).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "w").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete permission:
response = request("/nacos/v1/auth/permissions",
Params.newParams().appendParam("role", role3).appendParam("resource", namespace1 + ":*:*")
.appendParam("action", "rw").appendParam("accessToken", accessToken).done(), String.class,
HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role1).appendParam("username", username1)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role2).appendParam("username", username2)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a role:
response = request("/nacos/v1/auth/roles",
Params.newParams().appendParam("role", role3).appendParam("username", username3)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a user:
response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username1).appendParam("password", password1)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a user:
response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username2).appendParam("password", password2)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
// Delete a user:
response = request("/nacos/v1/auth/users",
Params.newParams().appendParam("username", username3).appendParam("password", password3)
.appendParam("accessToken", accessToken).done(), String.class, HttpMethod.DELETE);
assertTrue(response.getStatusCode().is2xxSuccessful());
System.setProperty("nacos.core.auth.enabled", "false");
}
}
| AuthBase |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Foldables.java | {
"start": 1102,
"end": 1663
} | class ____ validate the type resolution of expressions before and after logical planning.
* If null is passed for Failures to the constructor, it means we are only type resolution.
* This is usually called when doing pre-logical planning validation.
* If a {@link Failures} instance is passed, it means we are doing post-logical planning validation as well.
* This is usually called after folding is done, during
* {@link org.elasticsearch.xpack.esql.capabilities.PostOptimizationVerificationAware} verification
*/
public static | to |
java | apache__maven | api/maven-api-core/src/test/java/org/apache/maven/api/services/ModelSourceTest.java | {
"start": 1210,
"end": 1249
} | interface ____ its implementations.
*/
| and |
java | spring-projects__spring-boot | test-support/spring-boot-test-support/src/main/java/org/springframework/boot/testsupport/classpath/ModifiedClassPathClassLoader.java | {
"start": 2568,
"end": 2646
} | class ____.
*
* @author Andy Wilkinson
* @author Christoph Dreis
*/
final | path |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rpc/TestingRpcServiceExtension.java | {
"start": 1159,
"end": 2181
} | class ____ implements CustomExtension {
@Nullable private TestingRpcService testingRpcService;
public TestingRpcServiceExtension() {
this.testingRpcService = null;
}
public TestingRpcService getTestingRpcService() {
Preconditions.checkNotNull(testingRpcService);
return testingRpcService;
}
@Override
public void before(ExtensionContext extensionContext) {
if (testingRpcService != null) {
terminateRpcService(testingRpcService);
}
testingRpcService = new TestingRpcService();
}
@Override
public void after(ExtensionContext extensionContext) {
if (testingRpcService != null) {
terminateRpcService(testingRpcService);
testingRpcService = null;
}
}
private void terminateRpcService(TestingRpcService testingRpcService) {
CompletableFuture<Void> terminationFuture = testingRpcService.closeAsync();
terminationFuture.join();
}
}
| TestingRpcServiceExtension |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatErrorTests.java | {
"start": 799,
"end": 1617
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
List<TestCaseSupplier> suppliers = paramsToSuppliers(ConcatTests.parameters());
// TODO support longer lists. Though this thing has 100s so we probably can't do them all.
suppliers.removeIf(s -> s.types().size() > 3);
return suppliers;
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Concat(source, args.get(0), args.subList(1, args.size()));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(false, validPerPosition, signature, (v, p) -> "string"));
}
}
| ConcatErrorTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/validation/ValidatorTests.java | {
"start": 1415,
"end": 1463
} | class ____ extends TestBean {
}
}
| TestBeanSubclass |
java | elastic__elasticsearch | x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchConcurrentStatusIT.java | {
"start": 11493,
"end": 11800
} | class ____ {
final LongAdder totalCalls = new LongAdder();
final LongAdder runningResponses = new LongAdder();
final LongAdder completedResponses = new LongAdder();
final LongAdder exceptions = new LongAdder();
final LongAdder gone410 = new LongAdder();
}
}
| PollStats |
java | spring-projects__spring-boot | module/spring-boot-jersey/src/main/java/org/springframework/boot/jersey/actuate/endpoint/web/JerseyRemainingPathSegmentProvider.java | {
"start": 883,
"end": 1033
} | interface ____ {
String get(ContainerRequestContext requestContext, String matchAllRemainingPathSegmentsVariable);
}
| JerseyRemainingPathSegmentProvider |
java | apache__camel | components/camel-spring-parent/camel-spring-ws/src/test/java/org/apache/camel/component/spring/ws/processor/PrecedenceProcessor.java | {
"start": 1223,
"end": 2543
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
// same sample data
InputStream is = getClass().getResourceAsStream("/stockquote-response.txt");
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(is);
exchange.getMessage().setHeader(SpringWebserviceConstants.SPRING_WS_ADDRESSING_ACTION,
new URI("http://actionPrecedence.com"));
exchange.getMessage().setHeader(SpringWebserviceConstants.SPRING_WS_ADDRESSING_PRODUCER_REPLY_TO,
new URI("http://replyPrecedence.to"));
exchange.getMessage().setHeader(SpringWebserviceConstants.SPRING_WS_ADDRESSING_PRODUCER_FAULT_TO,
new URI("http://faultPrecedence.to"));
exchange.getMessage().setHeader(SpringWebserviceConstants.SPRING_WS_ADDRESSING_CONSUMER_OUTPUT_ACTION,
new URI("http://outputHeader.com"));
exchange.getMessage().setHeader(SpringWebserviceConstants.SPRING_WS_ADDRESSING_CONSUMER_FAULT_ACTION,
new URI("http://faultHeader.com"));
exchange.getMessage().copyFrom(exchange.getIn());
exchange.getMessage().setBody(doc);
}
}
| PrecedenceProcessor |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/util/OnCommittedResponseWrapper.java | {
"start": 7680,
"end": 12580
} | class ____ extends PrintWriter {
private final PrintWriter delegate;
SaveContextPrintWriter(PrintWriter delegate) {
super(delegate);
this.delegate = delegate;
}
@Override
public void flush() {
doOnResponseCommitted();
this.delegate.flush();
}
@Override
public void close() {
doOnResponseCommitted();
this.delegate.close();
}
@Override
public boolean equals(Object obj) {
return this.delegate.equals(obj);
}
@Override
public int hashCode() {
return this.delegate.hashCode();
}
@Override
public String toString() {
return getClass().getName() + "[delegate=" + this.delegate.toString() + "]";
}
@Override
public boolean checkError() {
return this.delegate.checkError();
}
@Override
public void write(int c) {
trackContentLength(c);
this.delegate.write(c);
}
@Override
public void write(char[] buf, int off, int len) {
checkContentLength(len);
this.delegate.write(buf, off, len);
}
@Override
public void write(char[] buf) {
trackContentLength(buf);
this.delegate.write(buf);
}
@Override
public void write(String s, int off, int len) {
checkContentLength(len);
this.delegate.write(s, off, len);
}
@Override
public void write(String s) {
trackContentLength(s);
this.delegate.write(s);
}
@Override
public void print(boolean b) {
trackContentLength(b);
this.delegate.print(b);
}
@Override
public void print(char c) {
trackContentLength(c);
this.delegate.print(c);
}
@Override
public void print(int i) {
trackContentLength(i);
this.delegate.print(i);
}
@Override
public void print(long l) {
trackContentLength(l);
this.delegate.print(l);
}
@Override
public void print(float f) {
trackContentLength(f);
this.delegate.print(f);
}
@Override
public void print(double d) {
trackContentLength(d);
this.delegate.print(d);
}
@Override
public void print(char[] s) {
trackContentLength(s);
this.delegate.print(s);
}
@Override
public void print(String s) {
trackContentLength(s);
this.delegate.print(s);
}
@Override
public void print(Object obj) {
trackContentLength(obj);
this.delegate.print(obj);
}
@Override
public void println() {
trackContentLengthLn();
this.delegate.println();
}
@Override
public void println(boolean x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(char x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(int x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(long x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(float x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(double x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(char[] x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(String x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public void println(Object x) {
trackContentLength(x);
trackContentLengthLn();
this.delegate.println(x);
}
@Override
public PrintWriter printf(String format, Object... args) {
return this.delegate.printf(format, args);
}
@Override
public PrintWriter printf(Locale l, String format, Object... args) {
return this.delegate.printf(l, format, args);
}
@Override
public PrintWriter format(String format, Object... args) {
return this.delegate.format(format, args);
}
@Override
public PrintWriter format(Locale l, String format, Object... args) {
return this.delegate.format(l, format, args);
}
@Override
public PrintWriter append(CharSequence csq) {
checkContentLength(csq.length());
return this.delegate.append(csq);
}
@Override
public PrintWriter append(CharSequence csq, int start, int end) {
checkContentLength(end - start);
return this.delegate.append(csq, start, end);
}
@Override
public PrintWriter append(char c) {
trackContentLength(c);
return this.delegate.append(c);
}
}
/**
* Ensures{@link OnCommittedResponseWrapper#onResponseCommitted()} is invoked before
* calling methods that commit the response. We delegate all methods to the original
* {@link jakarta.servlet.ServletOutputStream} to ensure that the behavior is as close
* to the original {@link jakarta.servlet.ServletOutputStream} as possible. See
* SEC-2039
*
* @author Rob Winch
*/
private | SaveContextPrintWriter |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/odps/ast/OdpsAddTableStatement.java | {
"start": 1152,
"end": 2883
} | class ____ extends OdpsStatementImpl implements SQLAlterStatement {
private SQLExprTableSource table;
private final List<SQLAssignItem> partitions = new ArrayList<SQLAssignItem>();
protected SQLExpr comment;
protected boolean force;
protected final List<SQLPrivilegeItem> privileges = new ArrayList<SQLPrivilegeItem>();
protected SQLName toPackage;
public OdpsAddTableStatement() {
super.dbType = DbType.odps;
}
@Override
protected void accept0(OdpsASTVisitor visitor) {
if (visitor.visit(this)) {
this.acceptChild(visitor, table);
}
visitor.endVisit(this);
}
public SQLExprTableSource getTable() {
return table;
}
public void setTable(SQLExprTableSource table) {
if (table != null) {
table.setParent(table);
}
this.table = table;
}
public void setTable(SQLName table) {
this.setTable(new SQLExprTableSource(table));
}
public SQLExpr getComment() {
return comment;
}
public void setComment(SQLExpr x) {
if (x != null) {
x.setParent(this);
}
this.comment = x;
}
public boolean isForce() {
return force;
}
public void setForce(boolean force) {
this.force = force;
}
public List<SQLAssignItem> getPartitions() {
return partitions;
}
public SQLName getToPackage() {
return toPackage;
}
public void setToPackage(SQLName x) {
if (x != null) {
x.setParent(this);
}
this.toPackage = x;
}
public List<SQLPrivilegeItem> getPrivileges() {
return privileges;
}
}
| OdpsAddTableStatement |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java | {
"start": 2381,
"end": 2590
} | class ____ encapsulates checking storage locations during DataNode
* startup.
*
* Some of this code was extracted from the DataNode class.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public | that |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskExecutorCustomizer.java | {
"start": 745,
"end": 966
} | interface ____ can be used to customize a {@link SimpleAsyncTaskExecutor}.
*
* @author Stephane Nicoll
* @author Moritz Halbritter
* @since 3.2.0
* @see SimpleAsyncTaskExecutorBuilder
*/
@FunctionalInterface
public | that |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/standalone/resultmatchers/XmlContentAssertionTests.java | {
"start": 3435,
"end": 3998
} | class ____ {
@RequestMapping(value="/music/people")
@ResponseBody
public PeopleWrapper getPeople() {
List<Person> composers = Arrays.asList(
new Person("Johann Sebastian Bach").setSomeDouble(21),
new Person("Johannes Brahms").setSomeDouble(.0025),
new Person("Edvard Grieg").setSomeDouble(1.6035),
new Person("Robert Schumann").setSomeDouble(Double.NaN));
return new PeopleWrapper(composers);
}
}
@SuppressWarnings("unused")
@XmlRootElement(name="people")
@XmlAccessorType(XmlAccessType.FIELD)
private static | MusicController |
java | apache__camel | components/camel-bean/src/main/java/org/apache/camel/component/bean/MethodInfo.java | {
"start": 29960,
"end": 30497
} | class ____)
valid = BeanHelper.isValidParameterValue(exp);
if (!valid && !varargs) {
// it may be a parameter type instead, and if so, then we should return null,
// as this method is only for evaluating parameter values
Boolean isClass = BeanHelper.isAssignableToExpectedType(exchange.getContext().getClassResolver(), exp,
parameterType);
// the method will return a non-null value if exp is a | syntax |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/TNonblockingMultiFetchStats.java | {
"start": 916,
"end": 3600
} | class ____ {
private int numTotalServers;
private int numReadCompletedServers;
private int numConnectErrorServers;
private int totalRecvBufBytes;
private int maxResponseBytes;
private int numOverflowedRecvBuf;
private int numInvalidFrameSize;
// time from the beginning of fetch() function to the reading finish
// time of the last socket (in millisecond)
private long readTime;
public TNonblockingMultiFetchStats() {
clear();
}
public void clear() {
numTotalServers = 0;
numReadCompletedServers = 0;
numConnectErrorServers = 0;
totalRecvBufBytes = 0;
maxResponseBytes = 0;
numOverflowedRecvBuf = 0;
numInvalidFrameSize = 0;
readTime = 0;
}
public String toString() {
String stats =
String.format(
"numTotalServers=%d, "
+ "numReadCompletedServers=%d, numConnectErrorServers=%d, "
+ "numUnresponsiveServers=%d, totalRecvBufBytes=%fM, "
+ "maxResponseBytes=%d, numOverflowedRecvBuf=%d, "
+ "numInvalidFrameSize=%d, readTime=%dms",
numTotalServers,
numReadCompletedServers,
numConnectErrorServers,
(numTotalServers - numReadCompletedServers - numConnectErrorServers),
totalRecvBufBytes / 1024.0 / 1024,
maxResponseBytes,
numOverflowedRecvBuf,
numInvalidFrameSize,
readTime);
return stats;
}
public void setNumTotalServers(int val) {
numTotalServers = val;
}
public void setMaxResponseBytes(int val) {
maxResponseBytes = val;
}
public void setReadTime(long val) {
readTime = val;
}
public void incNumReadCompletedServers() {
numReadCompletedServers++;
}
public void incNumConnectErrorServers() {
numConnectErrorServers++;
}
public void incNumOverflowedRecvBuf() {
numOverflowedRecvBuf++;
}
public void incTotalRecvBufBytes(int val) {
totalRecvBufBytes += val;
}
public void incNumInvalidFrameSize() {
numInvalidFrameSize++;
}
public int getMaxResponseBytes() {
return maxResponseBytes;
}
public int getNumReadCompletedServers() {
return numReadCompletedServers;
}
public int getNumConnectErrorServers() {
return numConnectErrorServers;
}
public int getNumTotalServers() {
return numTotalServers;
}
public int getNumOverflowedRecvBuf() {
return numOverflowedRecvBuf;
}
public int getTotalRecvBufBytes() {
return totalRecvBufBytes;
}
public int getNumInvalidFrameSize() {
return numInvalidFrameSize;
}
public long getReadTime() {
return readTime;
}
}
| TNonblockingMultiFetchStats |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java | {
"start": 15177,
"end": 15991
} | class ____ implements AggValueExtractor {
@Override
public Object value(Aggregation agg, Map<String, String> fieldTypeMap, String lookupFieldPrefix) {
MultiValueAggregation aggregation = (MultiValueAggregation) agg;
Map<String, Object> extracted = new LinkedHashMap<>();
for (String valueName : aggregation.valueNames()) {
List<String> valueAsStrings = aggregation.getValuesAsStrings(valueName);
// todo: size > 1 is not supported, requires a refactoring so that `size()` is exposed in the agg builder
if (valueAsStrings.size() > 0) {
extracted.put(valueName, valueAsStrings.get(0));
}
}
return extracted;
}
}
static | MultiValueAggExtractor |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java | {
"start": 1833,
"end": 2918
} | class ____ implements Writeable {
private final Example e1;
private final Example e2;
NamedHolder(Example e) {
this.e1 = e;
this.e2 = e;
}
NamedHolder(StreamInput in) throws IOException {
e1 = ((DelayableWriteable.Deduplicator) in).deduplicate(in.readNamedWriteable(Example.class));
e2 = ((DelayableWriteable.Deduplicator) in).deduplicate(in.readNamedWriteable(Example.class));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(e1);
out.writeNamedWriteable(e2);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
NamedHolder other = (NamedHolder) obj;
return e1.equals(other.e1) && e2.equals(other.e2);
}
@Override
public int hashCode() {
return Objects.hash(e1, e2);
}
}
private static | NamedHolder |
java | alibaba__nacos | maintainer-client/src/main/java/com/alibaba/nacos/maintainer/client/ai/McpMaintainerService.java | {
"start": 1447,
"end": 25314
} | interface ____ {
/**
* List first 100 Mcp Servers in Nacos.
*
* @return Fist 100 mcp server list.
* @throws NacosException if fail to list mcp server
*/
default Page<McpServerBasicInfo> listMcpServer() throws NacosException {
return listMcpServer(1, 100);
}
/**
* List Mcp Servers in Nacos with page.
*
* @param pageNo the page number of mcp Servers
* @param pageSize the size of each page
* @return paged mcp Server list
* @throws NacosException if fail to list mcp server
*/
default Page<McpServerBasicInfo> listMcpServer(int pageNo, int pageSize) throws NacosException {
return listMcpServer(StringUtils.EMPTY, pageNo, pageSize);
}
/**
* List Mcp Servers in Nacos with page.
*
* @param mcpName mcpName pattern, if empty string or null, will list all Mcp Servers.
* @param pageNo the page number of mcp Servers
* @param pageSize the size of each page
* @return paged mcp Server list
* @throws NacosException if fail to list mcp server
*/
default Page<McpServerBasicInfo> listMcpServer(String mcpName, int pageNo, int pageSize) throws NacosException {
return listMcpServer(Constants.DEFAULT_NAMESPACE_ID, mcpName, pageNo, pageSize);
}
/**
* List Mcp Servers in Nacos with page.
*
* @param namespaceId namespaceId
* @param mcpName mcpName pattern, if empty string or null, will list all Mcp Servers.
* @param pageNo the page number of mcp Servers
* @param pageSize the size of each page
* @return paged mcp Server list
* @throws NacosException if fail to list mcp server
*/
Page<McpServerBasicInfo> listMcpServer(String namespaceId, String mcpName, int pageNo, int pageSize) throws NacosException;
/**
* Blur search first 100 Mcp Servers in Nacos with mcp name pattern.
*
* @param mcpName mcpName pattern, if empty string or null, will list all Mcp Servers.
* @return First 100 mcp server list matched input mcpName pattern.
* @throws NacosException if fail to search mcp server
*/
default Page<McpServerBasicInfo> searchMcpServer(String mcpName) throws NacosException {
return searchMcpServer(mcpName, 1, 100);
}
/**
* Blur search first 100 Mcp Servers in Nacos with mcp name pattern.
*
* @param mcpName mcpName pattern, if empty string or null, will list all Mcp Servers.
* @param pageNo the page number of mcp Servers
* @param pageSize the size of each page
* @return paged mcp Server list matched input mcpName pattern.
* @throws NacosException if fail to search mcp server
*/
default Page<McpServerBasicInfo> searchMcpServer(String mcpName, int pageNo, int pageSize) throws NacosException {
return searchMcpServer(Constants.DEFAULT_NAMESPACE_ID, mcpName, pageNo, pageSize);
}
/**
* Blur search first 100 Mcp Servers in Nacos with mcp name pattern.
*
* @param namespaceId namespaceId
* @param mcpName mcpName pattern, if empty string or null, will list all Mcp Servers.
* @param pageNo the page number of mcp Servers
* @param pageSize the size of each page
* @return paged mcp Server list matched input mcpName pattern.
* @throws NacosException if fail to search mcp server
*/
Page<McpServerBasicInfo> searchMcpServer(String namespaceId, String mcpName, int pageNo, int pageSize) throws NacosException;
/**
* Get mcp server detail information from Nacos.
*
* @param mcpName the mcp server name
* @return detail information for this mcp server
* @throws NacosException if fail to get mcp server
*/
default McpServerDetailInfo getMcpServerDetail(String mcpName) throws NacosException {
return getMcpServerDetail(mcpName, null);
}
/**
* Get mcp server detail information from Nacos.
*
* @param mcpName the mcp server name
* @param version the mcp server version
* @return detail information for this mcp server
* @throws NacosException if fail to get mcp server
*/
default McpServerDetailInfo getMcpServerDetail(String mcpName, String version) throws NacosException {
return getMcpServerDetail(Constants.DEFAULT_NAMESPACE_ID, mcpName, null, version);
}
/**
* Gets mcp server detail.
*
* @param namespaceId the namespace id
* @param mcpName the mcp name
* @param version the version
* @return the mcp server detail
* @throws NacosException the nacos exception
*/
default McpServerDetailInfo getMcpServerDetail(String namespaceId, String mcpName, String version) throws NacosException {
return getMcpServerDetail(namespaceId, mcpName, null, version);
}
/**
* Get mcp server detail information from Nacos.
*
* @param namespaceId namespaceId
* @param mcpName the mcp server name
* @param mcpId the mcp server id
* @param version the mcp server version
* @return detail information for this mcp server
* @throws NacosException if fail to get mcp server
*/
McpServerDetailInfo getMcpServerDetail(String namespaceId, String mcpName, String mcpId, String version)
throws NacosException;
/**
* Create new local mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createLocalMcpServer(String mcpName, String version) throws NacosException {
return createLocalMcpServer(mcpName, version, null);
}
/**
* Create new local mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param description description of the new mcp server
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createLocalMcpServer(String mcpName, String version, String description) throws NacosException {
return createLocalMcpServer(mcpName, version, description, null);
}
/**
* Create new local mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param description description of the new mcp server
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createLocalMcpServer(String mcpName, String version, String description,
McpToolSpecification toolSpec) throws NacosException {
return createLocalMcpServer(mcpName, version, description, null, toolSpec);
}
/**
* Create new local mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param description description of the new mcp server
* @param localServerConfig custom config of the new mcp server
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createLocalMcpServer(String mcpName, String version, String description,
Map<String, Object> localServerConfig, McpToolSpecification toolSpec) throws NacosException {
McpServerBasicInfo serverSpec = new McpServerBasicInfo();
serverSpec.setName(mcpName);
serverSpec.setProtocol(AiConstants.Mcp.MCP_PROTOCOL_STDIO);
ServerVersionDetail versionDetail = new ServerVersionDetail();
versionDetail.setVersion(version);
serverSpec.setVersionDetail(versionDetail);
serverSpec.setDescription(description);
serverSpec.setLocalServerConfig(localServerConfig);
return createLocalMcpServer(mcpName, serverSpec, toolSpec);
}
/**
* Create new local mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo} which `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createLocalMcpServer(String mcpName, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec)
throws NacosException {
if (Objects.isNull(serverSpec)) {
throw new NacosException(NacosException.INVALID_PARAM, "Mcp server specification cannot be null.");
}
if (!AiConstants.Mcp.MCP_PROTOCOL_STDIO.equalsIgnoreCase(serverSpec.getProtocol())) {
throw new NacosException(NacosException.INVALID_PARAM,
String.format("Mcp server type must be `local`, input is `%s`", serverSpec.getProtocol()));
}
return createMcpServer(mcpName, serverSpec, toolSpec, null);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param protocol mcp protocol type not {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, can't be null.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, String version, String protocol,
McpEndpointSpec endpointSpec) throws NacosException {
return createRemoteMcpServer(mcpName, version, protocol, new McpServerRemoteServiceConfig(), endpointSpec);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param protocol mcp protocol type not {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param remoteServiceConfig remote service configuration, see {@link McpServerRemoteServiceConfig}.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, can't be null.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, String version, String protocol,
McpServerRemoteServiceConfig remoteServiceConfig, McpEndpointSpec endpointSpec) throws NacosException {
return createRemoteMcpServer(mcpName, version, null, protocol, remoteServiceConfig, endpointSpec);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param description description of the new mcp server
* @param protocol mcp protocol type not {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param remoteServiceConfig remote service configuration, see {@link McpServerRemoteServiceConfig}.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, can't be null.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, String version, String description, String protocol,
McpServerRemoteServiceConfig remoteServiceConfig, McpEndpointSpec endpointSpec) throws NacosException {
return createRemoteMcpServer(mcpName, version, description, protocol, remoteServiceConfig, endpointSpec, null);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param version version of the new mcp server
* @param description description of the new mcp server
* @param protocol mcp protocol type not {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param remoteServiceConfig remote service configuration, see {@link McpServerRemoteServiceConfig}.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, can't be null.
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, String version, String description, String protocol,
McpServerRemoteServiceConfig remoteServiceConfig, McpEndpointSpec endpointSpec, McpToolSpecification toolSpec)
throws NacosException {
McpServerBasicInfo serverSpec = new McpServerBasicInfo();
serverSpec.setName(mcpName);
serverSpec.setProtocol(protocol);
ServerVersionDetail detail = new ServerVersionDetail();
detail.setVersion(version);
serverSpec.setVersionDetail(detail);
serverSpec.setDescription(description);
serverSpec.setRemoteServerConfig(remoteServiceConfig);
return createRemoteMcpServer(mcpName, serverSpec, toolSpec, endpointSpec);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo} which `type` is not
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, can't be null.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, McpServerBasicInfo serverSpec, McpEndpointSpec endpointSpec)
throws NacosException {
return createRemoteMcpServer(mcpName, serverSpec, null, endpointSpec);
}
/**
* Create new remote mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo} which `type` is not
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createRemoteMcpServer(String mcpName, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec) throws NacosException {
if (Objects.isNull(serverSpec)) {
throw new NacosException(NacosException.INVALID_PARAM, "Mcp server specification cannot be null.");
}
if (AiConstants.Mcp.MCP_PROTOCOL_STDIO.equalsIgnoreCase(serverSpec.getProtocol())) {
throw new NacosException(NacosException.INVALID_PARAM, "Mcp server type cannot be `local` or empty.");
}
if (Objects.isNull(endpointSpec)) {
throw new NacosException(NacosException.INVALID_PARAM, "Mcp server endpoint specification cannot be null.");
}
return createMcpServer(mcpName, serverSpec, toolSpec, endpointSpec);
}
/**
* Create new mcp server to Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
default String createMcpServer(String mcpName, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec) throws NacosException {
return createMcpServer(Constants.DEFAULT_NAMESPACE_ID, mcpName, serverSpec, toolSpec, endpointSpec);
}
/**
* Create new mcp server to Nacos.
*
* @param namespaceId namespaceId
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @return mcp server id of the new mcp server
* @throws NacosException if fail to create mcp server.
*/
String createMcpServer(String namespaceId, String mcpName, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec) throws NacosException;
/**
* Update existed mcp server to Nacos Default namespace.
* <p>
* Please Query Full information by {@link #getMcpServerDetail(String)} and input Full information to this method.
* This method will full cover update the old information.
* </p>
*
* @param mcpName mcp server name of the new mcp server
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @return {@code true} if create success, {@code false} otherwise
* @throws NacosException if fail to create mcp server.
*/
default boolean updateMcpServer(String mcpName, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec) throws NacosException {
return updateMcpServer(mcpName, true, serverSpec, toolSpec, endpointSpec);
}
/**
* Update existed mcp server to Nacos Default namespace.
* <p>
* Please Query Full information by {@link #getMcpServerDetail(String)} and input Full information to this method.
* This method will full cover update the old information.
* </p>
*
* @param mcpName mcp server name of the new mcp server
* @param isLatest publish current version to latest
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @return {@code true} if create success, {@code false} otherwise
* @throws NacosException if fail to create mcp server.
*/
default boolean updateMcpServer(String mcpName, boolean isLatest, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec) throws NacosException {
return updateMcpServer(Constants.DEFAULT_NAMESPACE_ID, mcpName, isLatest, serverSpec, toolSpec, endpointSpec);
}
/**
* Update existed mcp server to Nacos.
* <p>
* Please Query Full information by {@link #getMcpServerDetail(String)} and input Full information to this method.
* This method will full cover update the old information.
* </p>
*
* @param namespaceId namespaceId
* @param mcpName mcp server name of the new mcp server
* @param isLatest publish current version to latest
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @return {@code true} if create success, {@code false} otherwise
* @throws NacosException if fail to create mcp server.
*/
default boolean updateMcpServer(String namespaceId, String mcpName, boolean isLatest, McpServerBasicInfo serverSpec,
McpToolSpecification toolSpec, McpEndpointSpec endpointSpec) throws NacosException {
return updateMcpServer(namespaceId, mcpName, isLatest, serverSpec, toolSpec, endpointSpec, false);
}
/**
* Update existed mcp server to Nacos.
* <p>
* Please Query Full information by {@link #getMcpServerDetail(String)} and input Full information to this method.
* This method will full cover update the old information.
* </p>
*
* @param namespaceId namespaceId
* @param mcpName mcp server name of the new mcp server
* @param isLatest publish current version to latest
* @param serverSpec mcp server specification, see {@link McpServerBasicInfo}
* @param toolSpec mcp server tools specification, see {@link McpToolSpecification}, nullable.
* @param endpointSpec mcp server endpoint specification, see {@link McpEndpointSpec}, nullable if `type` is
* {@link AiConstants.Mcp#MCP_PROTOCOL_STDIO}.
* @param overrideExisting if replace all the instances when update the mcp server
* @return {@code true} if create success, {@code false} otherwise
* @throws NacosException if fail to create mcp server.
*/
boolean updateMcpServer(String namespaceId, String mcpName, boolean isLatest, McpServerBasicInfo serverSpec, McpToolSpecification toolSpec,
McpEndpointSpec endpointSpec, boolean overrideExisting) throws NacosException;
/**
* Delete existed mcp server from Nacos.
*
* @param mcpName mcp server name of the new mcp server
* @return {@code true} if delete success, {@code false} otherwise
* @throws NacosException if fail to delete mcp server.
*/
default boolean deleteMcpServer(String mcpName) throws NacosException {
return deleteMcpServer(Constants.DEFAULT_NAMESPACE_ID, mcpName, null, null);
}
/**
* Delete existed mcp server from Nacos.
*
* @param namespaceId namespaceId
* @param mcpName mcp server name of the new mcp server
* @param mcpId mcp server id of the new mcp server
* @param version mcp version of the new mcp server
* @return {@code true} if delete success, {@code false} otherwise
* @throws NacosException if fail to delete mcp server.
*/
boolean deleteMcpServer(String namespaceId, String mcpName, String mcpId, String version) throws NacosException;
}
| McpMaintainerService |
java | apache__camel | components/camel-barcode/src/test/java/org/apache/camel/dataformat/barcode/BarcodeDataFormatTest.java | {
"start": 5498,
"end": 6066
} | class ____.
*/
@Test
final void testAddToHintMapEncodeHintTypeObject() throws IOException {
EncodeHintType hintType = EncodeHintType.MARGIN;
Object value = 10;
try (BarcodeDataFormat instance = new BarcodeDataFormat()) {
instance.start();
instance.addToHintMap(hintType, value);
assertTrue(instance.getWriterHintMap().containsKey(hintType));
assertEquals(instance.getWriterHintMap().get(hintType), value);
}
}
/**
* Test of addToHintMap method, of | BarcodeDataFormat |
java | spring-projects__spring-boot | documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/web/mappings/MappingsEndpointReactiveDocumentationTests.java | {
"start": 3278,
"end": 5938
} | class ____ extends AbstractEndpointDocumentationTests {
@LocalServerPort
private int port;
private WebTestClient client;
@BeforeEach
void webTestClient(RestDocumentationContextProvider restDocumentation) {
this.client = WebTestClient.bindToServer()
.filter(documentationConfiguration(restDocumentation).snippets().withDefaults())
.baseUrl("http://localhost:" + this.port)
.responseTimeout(Duration.ofMinutes(5))
.build();
}
@Test
void mappings() {
List<FieldDescriptor> requestMappingConditions = List.of(
requestMappingConditionField("").description("Details of the request mapping conditions.").optional(),
requestMappingConditionField(".consumes").description("Details of the consumes condition"),
requestMappingConditionField(".consumes.[].mediaType").description("Consumed media type."),
requestMappingConditionField(".consumes.[].negated").description("Whether the media type is negated."),
requestMappingConditionField(".headers").description("Details of the headers condition."),
requestMappingConditionField(".headers.[].name").description("Name of the header."),
requestMappingConditionField(".headers.[].value").description("Required value of the header, if any."),
requestMappingConditionField(".headers.[].negated").description("Whether the value is negated."),
requestMappingConditionField(".methods").description("HTTP methods that are handled."),
requestMappingConditionField(".params").description("Details of the params condition."),
requestMappingConditionField(".params.[].name").description("Name of the parameter."),
requestMappingConditionField(".params.[].value")
.description("Required value of the parameter, if any."),
requestMappingConditionField(".params.[].negated").description("Whether the value is negated."),
requestMappingConditionField(".patterns")
.description("Patterns identifying the paths handled by the mapping."),
requestMappingConditionField(".produces").description("Details of the produces condition."),
requestMappingConditionField(".produces.[].mediaType").description("Produced media type."),
requestMappingConditionField(".produces.[].negated").description("Whether the media type is negated."));
List<FieldDescriptor> handlerMethod = List.of(
fieldWithPath("*.[].details.handlerMethod").optional()
.type(JsonFieldType.OBJECT)
.description("Details of the method, if any, that will handle requests to this mapping."),
fieldWithPath("*.[].details.handlerMethod.className").type(JsonFieldType.STRING)
.description("Fully qualified name of the | MappingsEndpointReactiveDocumentationTests |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/codegen/InjectionBase.java | {
"start": 502,
"end": 614
} | class ____ {
private InjectionBase() {
throw new UnsupportedOperationException();
}
}
| InjectionBase |
java | spring-projects__spring-boot | module/spring-boot-data-commons/src/main/java/org/springframework/boot/data/autoconfigure/web/DataWebProperties.java | {
"start": 1308,
"end": 3792
} | class ____ {
/**
* Page index parameter name.
*/
private String pageParameter = "page";
/**
* Page size parameter name.
*/
private String sizeParameter = "size";
/**
* Whether to expose and assume 1-based page number indexes. Defaults to "false",
* meaning a page number of 0 in the request equals the first page.
*/
private boolean oneIndexedParameters;
/**
* General prefix to be prepended to the page number and page size parameters.
*/
private String prefix = "";
/**
* Delimiter to be used between the qualifier and the actual page number and size
* properties.
*/
private String qualifierDelimiter = "_";
/**
* Default page size.
*/
private int defaultPageSize = 20;
/**
* Maximum page size to be accepted.
*/
private int maxPageSize = 2000;
/**
* Configures how to render Spring Data Pageable instances.
*/
private PageSerializationMode serializationMode = PageSerializationMode.DIRECT;
public String getPageParameter() {
return this.pageParameter;
}
public void setPageParameter(String pageParameter) {
this.pageParameter = pageParameter;
}
public String getSizeParameter() {
return this.sizeParameter;
}
public void setSizeParameter(String sizeParameter) {
this.sizeParameter = sizeParameter;
}
public boolean isOneIndexedParameters() {
return this.oneIndexedParameters;
}
public void setOneIndexedParameters(boolean oneIndexedParameters) {
this.oneIndexedParameters = oneIndexedParameters;
}
public String getPrefix() {
return this.prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
public String getQualifierDelimiter() {
return this.qualifierDelimiter;
}
public void setQualifierDelimiter(String qualifierDelimiter) {
this.qualifierDelimiter = qualifierDelimiter;
}
public int getDefaultPageSize() {
return this.defaultPageSize;
}
public void setDefaultPageSize(int defaultPageSize) {
this.defaultPageSize = defaultPageSize;
}
public int getMaxPageSize() {
return this.maxPageSize;
}
public void setMaxPageSize(int maxPageSize) {
this.maxPageSize = maxPageSize;
}
public PageSerializationMode getSerializationMode() {
return this.serializationMode;
}
public void setSerializationMode(PageSerializationMode serializationMode) {
this.serializationMode = serializationMode;
}
}
/**
* Sort properties.
*/
public static | Pageable |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/bytearrays/ByteArrays_assertDoesNotContain_with_Integer_Arguments_Test.java | {
"start": 1855,
"end": 6565
} | class ____ extends ByteArraysBaseTest {
@Test
void should_pass_if_actual_does_not_contain_given_values() {
arrays.assertDoesNotContain(someInfo(), actual, IntArrays.arrayOf(12));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_even_if_duplicated() {
arrays.assertDoesNotContain(someInfo(), actual, IntArrays.arrayOf(12, 12, 20));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_empty() {
assertThatIllegalArgumentException().isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), actual,
IntArrays.emptyArray()))
.withMessage(valuesToLookForIsEmpty());
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), actual, (int[]) null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), null,
IntArrays.arrayOf(8)))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_contains_given_values() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> arrays.assertDoesNotContain(info, actual, IntArrays.arrayOf(6, 8, 20)));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContain(actual, arrayOf(6, 8, 20), newLinkedHashSet((byte) 6, (byte) 8)));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, IntArrays.arrayOf(12));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_even_if_duplicated_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, IntArrays.arrayOf(12, 12, 20));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_empty_whatever_custom_comparison_strategy_is() {
assertThatIllegalArgumentException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
actual,
IntArrays.emptyArray()))
.withMessage(valuesToLookForIsEmpty());
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
actual,
(int[]) null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
null,
IntArrays.arrayOf(-8)))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_contains_given_values_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(info, actual,
IntArrays.arrayOf(6, -8, 20)));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContain(actual, arrayOf(6, -8, 20), newLinkedHashSet((byte) 6, (byte) -8),
absValueComparisonStrategy));
}
}
| ByteArrays_assertDoesNotContain_with_Integer_Arguments_Test |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/MovedBlocks.java | {
"start": 1588,
"end": 4122
} | class ____<L> {
private final Block block; // the block
/** The locations of the replicas of the block. */
protected final List<L> locations = new ArrayList<L>(3);
public Locations(Block block) {
this.block = block;
}
/** clean block locations */
public synchronized void clearLocations() {
locations.clear();
}
/** add a location */
public synchronized void addLocation(L loc) {
if (!locations.contains(loc)) {
locations.add(loc);
}
}
/** @return if the block is located on the given location. */
public synchronized boolean isLocatedOn(L loc) {
return locations.contains(loc);
}
/** @return its locations */
public synchronized List<L> getLocations() {
return locations;
}
/* @return the block */
public Block getBlock() {
return block;
}
/* Return the length of the block */
public long getNumBytes() {
return block.getNumBytes();
}
@Override
public String toString() {
return block + " size=" + getNumBytes();
}
}
private static final int CUR_WIN = 0;
private static final int OLD_WIN = 1;
private static final int NUM_WINS = 2;
private final long winTimeInterval;
private long lastCleanupTime = Time.monotonicNow();
private final List<Map<Block, Locations<L>>> movedBlocks
= new ArrayList<Map<Block, Locations<L>>>(NUM_WINS);
/** initialize the moved blocks collection */
public MovedBlocks(long winTimeInterval) {
this.winTimeInterval = winTimeInterval;
movedBlocks.add(newMap());
movedBlocks.add(newMap());
}
private Map<Block, Locations<L>> newMap() {
return new HashMap<Block, Locations<L>>();
}
/** add a block thus marking a block to be moved */
public synchronized void put(Locations<L> block) {
movedBlocks.get(CUR_WIN).put(block.getBlock(), block);
}
/** @return if a block is marked as moved */
public synchronized boolean contains(Block block) {
return movedBlocks.get(CUR_WIN).containsKey(block) ||
movedBlocks.get(OLD_WIN).containsKey(block);
}
/** remove old blocks */
public synchronized void cleanup() {
long curTime = Time.monotonicNow();
// check if old win is older than winWidth
if (lastCleanupTime + winTimeInterval <= curTime) {
// purge the old window
movedBlocks.set(OLD_WIN, movedBlocks.get(CUR_WIN));
movedBlocks.set(CUR_WIN, newMap());
lastCleanupTime = curTime;
}
}
} | Locations |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MllpEndpointBuilderFactory.java | {
"start": 1579,
"end": 13308
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedMllpEndpointConsumerBuilder advanced() {
return (AdvancedMllpEndpointConsumerBuilder) this;
}
/**
* Enable/Disable the automatic generation of a MLLP Acknowledgement
* MLLP Consumers only.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param autoAck the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder autoAck(boolean autoAck) {
doSetProperty("autoAck", autoAck);
return this;
}
/**
* Enable/Disable the automatic generation of a MLLP Acknowledgement
* MLLP Consumers only.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param autoAck the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder autoAck(String autoAck) {
doSetProperty("autoAck", autoAck);
return this;
}
/**
* Sets the default charset to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param charsetName the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder charsetName(String charsetName) {
doSetProperty("charsetName", charsetName);
return this;
}
/**
* Enable/Disable the automatic generation of message headers from the
* HL7 Message MLLP Consumers only.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param hl7Headers the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder hl7Headers(boolean hl7Headers) {
doSetProperty("hl7Headers", hl7Headers);
return this;
}
/**
* Enable/Disable the automatic generation of message headers from the
* HL7 Message MLLP Consumers only.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param hl7Headers the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder hl7Headers(String hl7Headers) {
doSetProperty("hl7Headers", hl7Headers);
return this;
}
/**
* Enable/Disable strict compliance to the MLLP standard. The MLLP
* standard specifies START_OF_BLOCKhl7 payloadEND_OF_BLOCKEND_OF_DATA,
* however, some systems do not send the final END_OF_DATA byte. This
* setting controls whether or not the final END_OF_DATA byte is
* required or optional.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param requireEndOfData the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder requireEndOfData(boolean requireEndOfData) {
doSetProperty("requireEndOfData", requireEndOfData);
return this;
}
/**
* Enable/Disable strict compliance to the MLLP standard. The MLLP
* standard specifies START_OF_BLOCKhl7 payloadEND_OF_BLOCKEND_OF_DATA,
* however, some systems do not send the final END_OF_DATA byte. This
* setting controls whether or not the final END_OF_DATA byte is
* required or optional.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param requireEndOfData the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder requireEndOfData(String requireEndOfData) {
doSetProperty("requireEndOfData", requireEndOfData);
return this;
}
/**
* Enable/Disable converting the payload to a String. If enabled, HL7
* Payloads received from external systems will be validated converted
* to a String. If the charsetName property is set, that character set
* will be used for the conversion. If the charsetName property is not
* set, the value of MSH-18 will be used to determine th appropriate
* character set. If MSH-18 is not set, then the default ISO-8859-1
* character set will be use.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param stringPayload the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder stringPayload(boolean stringPayload) {
doSetProperty("stringPayload", stringPayload);
return this;
}
/**
* Enable/Disable converting the payload to a String. If enabled, HL7
* Payloads received from external systems will be validated converted
* to a String. If the charsetName property is set, that character set
* will be used for the conversion. If the charsetName property is not
* set, the value of MSH-18 will be used to determine th appropriate
* character set. If MSH-18 is not set, then the default ISO-8859-1
* character set will be use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param stringPayload the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder stringPayload(String stringPayload) {
doSetProperty("stringPayload", stringPayload);
return this;
}
/**
* Enable/Disable the validation of HL7 Payloads If enabled, HL7
* Payloads received from external systems will be validated (see
* Hl7Util.generateInvalidPayloadExceptionMessage for details on the
* validation). If and invalid payload is detected, a
* MllpInvalidMessageException (for consumers) or a
* MllpInvalidAcknowledgementException will be thrown.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param validatePayload the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder validatePayload(boolean validatePayload) {
doSetProperty("validatePayload", validatePayload);
return this;
}
/**
* Enable/Disable the validation of HL7 Payloads If enabled, HL7
* Payloads received from external systems will be validated (see
* Hl7Util.generateInvalidPayloadExceptionMessage for details on the
* validation). If and invalid payload is detected, a
* MllpInvalidMessageException (for consumers) or a
* MllpInvalidAcknowledgementException will be thrown.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param validatePayload the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder validatePayload(String validatePayload) {
doSetProperty("validatePayload", validatePayload);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* receive incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. If disabled, the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions by logging them at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* receive incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. If disabled, the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions by logging them at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Sets the SSLContextParameters for securing TCP connections. If set,
* the MLLP component will use SSL/TLS for securing both producer and
* consumer TCP connections. This allows the configuration of trust
* stores, key stores, protocols, and other SSL/TLS settings. If not
* set, the MLLP component will use plain TCP communication.
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Sets the SSLContextParameters for securing TCP connections. If set,
* the MLLP component will use SSL/TLS for securing both producer and
* consumer TCP connections. This allows the configuration of trust
* stores, key stores, protocols, and other SSL/TLS settings. If not
* set, the MLLP component will use plain TCP communication.
*
* The option will be converted to a
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default MllpEndpointConsumerBuilder sslContextParameters(String sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the MLLP component.
*/
public | MllpEndpointConsumerBuilder |
java | apache__camel | core/camel-core-model/src/generated/java/org/apache/camel/model/cloud/ServiceCallServiceChooserConfigurationConfigurer.java | {
"start": 730,
"end": 2967
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("Id", java.lang.String.class);
map.put("Properties", java.util.List.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.model.cloud.ServiceCallServiceChooserConfiguration target = (org.apache.camel.model.cloud.ServiceCallServiceChooserConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "id": target.setId(property(camelContext, java.lang.String.class, value)); return true;
case "properties": target.setProperties(property(camelContext, java.util.List.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "id": return java.lang.String.class;
case "properties": return java.util.List.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.model.cloud.ServiceCallServiceChooserConfiguration target = (org.apache.camel.model.cloud.ServiceCallServiceChooserConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "id": return target.getId();
case "properties": return target.getProperties();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "properties": return org.apache.camel.model.PropertyDefinition.class;
default: return null;
}
}
}
| ServiceCallServiceChooserConfigurationConfigurer |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08.java | {
"start": 1609,
"end": 16472
} | class ____ extends WebSocketClientHandshaker {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(WebSocketClientHandshaker08.class);
public static final String MAGIC_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
private String expectedChallengeResponseString;
private final boolean allowExtensions;
private final boolean performMasking;
private final boolean allowMaskMismatch;
/**
* Creates a new instance.
*
* @param webSocketURL
* URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be
* sent to this URL.
* @param version
* Version of web socket specification to use to connect to the server
* @param subprotocol
* Sub protocol request sent to the server.
* @param allowExtensions
* Allow extensions to be used in the reserved bits of the web socket frame
* @param customHeaders
* Map of custom headers to add to the client request
* @param maxFramePayloadLength
* Maximum length of a frame's payload
*/
public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol,
boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength) {
this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, true,
false, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS);
}
/**
* Creates a new instance.
*
* @param webSocketURL
* URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be
* sent to this URL.
* @param version
* Version of web socket specification to use to connect to the server
* @param subprotocol
* Sub protocol request sent to the server.
* @param allowExtensions
* Allow extensions to be used in the reserved bits of the web socket frame
* @param customHeaders
* Map of custom headers to add to the client request
* @param maxFramePayloadLength
* Maximum length of a frame's payload
* @param performMasking
* Whether to mask all written websocket frames. This must be set to true in order to be fully compatible
* with the websocket specifications. Client applications that communicate with a non-standard server
* which doesn't require masking might set this to false to achieve a higher performance.
* @param allowMaskMismatch
* When set to true, frames which are not masked properly according to the standard will still be
* accepted
*/
public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol,
boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength,
boolean performMasking, boolean allowMaskMismatch) {
this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking,
allowMaskMismatch, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS);
}
/**
* Creates a new instance.
*
* @param webSocketURL
* URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be
* sent to this URL.
* @param version
* Version of web socket specification to use to connect to the server
* @param subprotocol
* Sub protocol request sent to the server.
* @param allowExtensions
* Allow extensions to be used in the reserved bits of the web socket frame
* @param customHeaders
* Map of custom headers to add to the client request
* @param maxFramePayloadLength
* Maximum length of a frame's payload
* @param performMasking
* Whether to mask all written websocket frames. This must be set to true in order to be fully compatible
* with the websocket specifications. Client applications that communicate with a non-standard server
* which doesn't require masking might set this to false to achieve a higher performance.
* @param allowMaskMismatch
* When set to true, frames which are not masked properly according to the standard will still be
* accepted
* @param forceCloseTimeoutMillis
* Close the connection if it was not closed by the server after timeout specified.
*/
public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol,
boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength,
boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis) {
this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking,
allowMaskMismatch, forceCloseTimeoutMillis, false, true);
}
/**
* Creates a new instance.
*
* @param webSocketURL
* URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be
* sent to this URL.
* @param version
* Version of web socket specification to use to connect to the server
* @param subprotocol
* Sub protocol request sent to the server.
* @param allowExtensions
* Allow extensions to be used in the reserved bits of the web socket frame
* @param customHeaders
* Map of custom headers to add to the client request
* @param maxFramePayloadLength
* Maximum length of a frame's payload
* @param performMasking
* Whether to mask all written websocket frames. This must be set to true in order to be fully compatible
* with the websocket specifications. Client applications that communicate with a non-standard server
* which doesn't require masking might set this to false to achieve a higher performance.
* @param allowMaskMismatch
* When set to true, frames which are not masked properly according to the standard will still be
* accepted
* @param forceCloseTimeoutMillis
* Close the connection if it was not closed by the server after timeout specified.
* @param absoluteUpgradeUrl
* Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over
* clear HTTP
*/
WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol,
boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength,
boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis,
boolean absoluteUpgradeUrl) {
this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking,
allowMaskMismatch, forceCloseTimeoutMillis, absoluteUpgradeUrl, true);
}
/**
* Creates a new instance.
*
* @param webSocketURL
* URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be
* sent to this URL.
* @param version
* Version of web socket specification to use to connect to the server
* @param subprotocol
* Sub protocol request sent to the server.
* @param allowExtensions
* Allow extensions to be used in the reserved bits of the web socket frame
* @param customHeaders
* Map of custom headers to add to the client request
* @param maxFramePayloadLength
* Maximum length of a frame's payload
* @param performMasking
* Whether to mask all written websocket frames. This must be set to true in order to be fully compatible
* with the websocket specifications. Client applications that communicate with a non-standard server
* which doesn't require masking might set this to false to achieve a higher performance.
* @param allowMaskMismatch
* When set to true, frames which are not masked properly according to the standard will still be
* accepted
* @param forceCloseTimeoutMillis
* Close the connection if it was not closed by the server after timeout specified.
* @param absoluteUpgradeUrl
* Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over
* clear HTTP
* @param generateOriginHeader
* Allows to generate a `Sec-WebSocket-Origin` header value for handshake request
* according to the given webSocketURL
*/
WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol,
boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength,
boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis,
boolean absoluteUpgradeUrl, boolean generateOriginHeader) {
super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis,
absoluteUpgradeUrl, generateOriginHeader);
this.allowExtensions = allowExtensions;
this.performMasking = performMasking;
this.allowMaskMismatch = allowMaskMismatch;
}
/**
* /**
* <p>
* Sends the opening request to the server:
* </p>
*
* <pre>
* GET /chat HTTP/1.1
* Host: server.example.com
* Upgrade: websocket
* Connection: Upgrade
* Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
* Sec-WebSocket-Origin: http://example.com
* Sec-WebSocket-Protocol: chat, superchat
* Sec-WebSocket-Version: 8
* </pre>
*
*/
@Override
protected FullHttpRequest newHandshakeRequest() {
URI wsURL = uri();
// Get 16 bit nonce and base 64 encode it
byte[] nonce = WebSocketUtil.randomBytes(16);
String key = WebSocketUtil.base64(nonce);
String acceptSeed = key + MAGIC_GUID;
byte[] sha1 = WebSocketUtil.sha1(acceptSeed.getBytes(CharsetUtil.US_ASCII));
expectedChallengeResponseString = WebSocketUtil.base64(sha1);
if (logger.isDebugEnabled()) {
logger.debug(
"WebSocket version 08 client handshake key: {}, expected response: {}",
key, expectedChallengeResponseString);
}
// Format request
FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, upgradeUrl(wsURL),
Unpooled.EMPTY_BUFFER);
HttpHeaders headers = request.headers();
if (customHeaders != null) {
headers.add(customHeaders);
if (!headers.contains(HttpHeaderNames.HOST)) {
// Only add HOST header if customHeaders did not contain it.
//
// See https://github.com/netty/netty/issues/10101
headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL));
}
} else {
headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL));
}
headers.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET)
.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE)
.set(HttpHeaderNames.SEC_WEBSOCKET_KEY, key);
if (generateOriginHeader && !headers.contains(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN)) {
headers.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL));
}
String expectedSubprotocol = expectedSubprotocol();
if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) {
headers.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol);
}
headers.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, version().toAsciiString());
return request;
}
/**
* <p>
* Process server response:
* </p>
*
* <pre>
* HTTP/1.1 101 Switching Protocols
* Upgrade: websocket
* Connection: Upgrade
* Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
* Sec-WebSocket-Protocol: chat
* </pre>
*
* @param response
* HTTP response returned from the server for the request sent by beginOpeningHandshake00().
* @throws WebSocketHandshakeException
*/
@Override
protected void verify(FullHttpResponse response) {
HttpResponseStatus status = response.status();
if (!HttpResponseStatus.SWITCHING_PROTOCOLS.equals(status)) {
throw new WebSocketClientHandshakeException("Invalid handshake response getStatus: " + status, response);
}
HttpHeaders headers = response.headers();
CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE);
if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) {
throw new WebSocketClientHandshakeException("Invalid handshake response upgrade: " + upgrade, response);
}
if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) {
throw new WebSocketClientHandshakeException("Invalid handshake response connection: "
+ headers.get(HttpHeaderNames.CONNECTION), response);
}
CharSequence accept = headers.get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT);
if (accept == null || !accept.equals(expectedChallengeResponseString)) {
throw new WebSocketClientHandshakeException(String.format(
"Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString), response);
}
}
@Override
protected WebSocketFrameDecoder newWebsocketDecoder() {
return new WebSocket08FrameDecoder(false, allowExtensions, maxFramePayloadLength(), allowMaskMismatch);
}
@Override
protected WebSocketFrameEncoder newWebSocketEncoder() {
return new WebSocket08FrameEncoder(performMasking);
}
@Override
public WebSocketClientHandshaker08 setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) {
super.setForceCloseTimeoutMillis(forceCloseTimeoutMillis);
return this;
}
}
| WebSocketClientHandshaker08 |
java | netty__netty | common/src/main/java/io/netty/util/internal/MathUtil.java | {
"start": 710,
"end": 3906
} | class ____ {
private MathUtil() {
}
/**
* Fast method of finding the next power of 2 greater than or equal to the supplied value.
*
* <p>If the value is {@code <= 0} then 1 will be returned.
* This method is not suitable for {@link Integer#MIN_VALUE} or numbers greater than 2^30.
*
* @param value from which to search for next power of 2
* @return The next power of 2 or the value itself if it is a power of 2
*/
public static int findNextPositivePowerOfTwo(final int value) {
assert value > Integer.MIN_VALUE && value < 0x40000000;
return 1 << (32 - Integer.numberOfLeadingZeros(value - 1));
}
/**
* Fast method of finding the next power of 2 greater than or equal to the supplied value.
* <p>This method will do runtime bounds checking and call {@link #findNextPositivePowerOfTwo(int)} if within a
* valid range.
* @param value from which to search for next power of 2
* @return The next power of 2 or the value itself if it is a power of 2.
* <p>Special cases for return values are as follows:
* <ul>
* <li>{@code <= 0} -> 1</li>
* <li>{@code >= 2^30} -> 2^30</li>
* </ul>
*/
public static int safeFindNextPositivePowerOfTwo(final int value) {
return value <= 0 ? 1 : value >= 0x40000000 ? 0x40000000 : findNextPositivePowerOfTwo(value);
}
/**
* Determine if the requested {@code index} and {@code length} will fit within {@code capacity}.
* @param index The starting index.
* @param length The length which will be utilized (starting from {@code index}).
* @param capacity The capacity that {@code index + length} is allowed to be within.
* @return {@code false} if the requested {@code index} and {@code length} will fit within {@code capacity}.
* {@code true} if this would result in an index out of bounds exception.
*/
public static boolean isOutOfBounds(int index, int length, int capacity) {
return (index | length | capacity | index + length) < 0 || index + length > capacity;
}
/**
* @deprecated not used anymore. User Integer.compare() instead. For removal.
* Compares two {@code int} values.
*
* @param x the first {@code int} to compare
* @param y the second {@code int} to compare
* @return the value {@code 0} if {@code x == y};
* {@code -1} if {@code x < y}; and
* {@code 1} if {@code x > y}
*/
@Deprecated
public static int compare(final int x, final int y) {
// do not subtract for comparison, it could overflow
return Integer.compare(x, y);
}
/**
* @deprecated not used anymore. User Long.compare() instead. For removal.
* Compare two {@code long} values.
* @param x the first {@code long} to compare.
* @param y the second {@code long} to compare.
* @return
* <ul>
* <li>0 if {@code x == y}</li>
* <li>{@code > 0} if {@code x > y}</li>
* <li>{@code < 0} if {@code x < y}</li>
* </ul>
*/
@Deprecated
public static int compare(long x, long y) {
return Long.compare(x, y);
}
}
| MathUtil |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/jdk8/ObservableCollectWithCollector.java | {
"start": 1348,
"end": 2304
} | class ____<T, A, R> extends Observable<R> {
final Observable<T> source;
final Collector<? super T, A, R> collector;
public ObservableCollectWithCollector(Observable<T> source, Collector<? super T, A, R> collector) {
this.source = source;
this.collector = collector;
}
@Override
protected void subscribeActual(@NonNull Observer<? super R> observer) {
A container;
BiConsumer<A, ? super T> accumulator;
Function<A, R> finisher;
try {
container = collector.supplier().get();
accumulator = collector.accumulator();
finisher = collector.finisher();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
EmptyDisposable.error(ex, observer);
return;
}
source.subscribe(new CollectorObserver<>(observer, container, accumulator, finisher));
}
static final | ObservableCollectWithCollector |
java | spring-projects__spring-framework | spring-jms/src/test/java/org/springframework/jms/annotation/AbstractJmsAnnotationDrivenTests.java | {
"start": 10331,
"end": 10576
} | class ____ {
@JmsListener(id = "first", destination = "myQueue")
@JmsListener(id = "second", destination = "anotherQueue", concurrency = "2-10")
public void repeatableHandle(String msg) {
}
}
@Component
static | JmsListenerRepeatableBean |
java | apache__camel | core/camel-util/src/test/java/org/apache/camel/util/UnitUtilsTest.java | {
"start": 1115,
"end": 2771
} | class ____ {
@Test
public void testPrintUnitFromBytes() {
// needed for the locales that have a decimal separator other than comma
char decimalSeparator = DecimalFormatSymbols.getInstance().getDecimalSeparator();
assertEquals("999 B", printUnitFromBytes(999));
assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytes(1000));
assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytes(1001));
assertEquals("1" + decimalSeparator + "2 kB", printUnitFromBytes(1201));
assertEquals("1000" + decimalSeparator + "0 kB", printUnitFromBytes(999999));
assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytes(1000000));
assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytes(1000001));
assertEquals("1" + decimalSeparator + "5 MB", printUnitFromBytes(1500001));
}
@Test
public void testPrintUnitFromBytesDot() {
char decimalSeparator = '.';
assertEquals("999 B", printUnitFromBytes(999));
assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytesDot(1000));
assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytesDot(1001));
assertEquals("1" + decimalSeparator + "2 kB", printUnitFromBytesDot(1201));
assertEquals("1000" + decimalSeparator + "0 kB", printUnitFromBytesDot(999999));
assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytesDot(1000000));
assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytesDot(1000001));
assertEquals("1" + decimalSeparator + "5 MB", printUnitFromBytesDot(1500001));
}
}
| UnitUtilsTest |
java | apache__flink | flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetRecordFormat.java | {
"start": 5650,
"end": 7032
} | class ____<E> implements StreamFormat.Reader<E> {
private final ParquetReader<E> parquetReader;
private long skipCount;
private final boolean checkpointed;
private AvroParquetRecordReader(ParquetReader<E> parquetReader) {
this(parquetReader, 0, false);
}
private AvroParquetRecordReader(
ParquetReader<E> parquetReader, long skipCount, boolean checkpointed) {
this.parquetReader = parquetReader;
this.skipCount = skipCount;
this.checkpointed = checkpointed;
}
@Nullable
@Override
public E read() throws IOException {
E record = parquetReader.read();
incrementPosition();
return record;
}
@Override
public void close() throws IOException {
parquetReader.close();
}
@Nullable
@Override
public CheckpointedPosition getCheckpointedPosition() {
// Since ParquetReader does not expose the offset, always use
// CheckpointedPosition.NO_OFFSET.
return checkpointed
? new CheckpointedPosition(CheckpointedPosition.NO_OFFSET, skipCount)
: null;
}
private void incrementPosition() {
skipCount++;
}
}
}
| AvroParquetRecordReader |
java | apache__flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SerializedCheckpointData.java | {
"start": 1302,
"end": 6758
} | class ____ implements java.io.Serializable {
private static final long serialVersionUID = -8783744683896503488L;
/** ID of the checkpoint for which the IDs are stored. */
private final long checkpointId;
/** The serialized elements. */
private final byte[] serializedData;
/** The number of elements in the checkpoint. */
private final int numIds;
/**
* Creates a SerializedCheckpointData object for the given serialized data.
*
* @param checkpointId The checkpointId of the checkpoint.
* @param serializedData The serialized IDs in this checkpoint.
* @param numIds The number of IDs in the checkpoint.
*/
public SerializedCheckpointData(long checkpointId, byte[] serializedData, int numIds) {
this.checkpointId = checkpointId;
this.serializedData = serializedData;
this.numIds = numIds;
}
/**
* Gets the checkpointId of the checkpoint.
*
* @return The checkpointId of the checkpoint.
*/
public long getCheckpointId() {
return checkpointId;
}
/**
* Gets the binary data for the serialized elements.
*
* @return The binary data for the serialized elements.
*/
public byte[] getSerializedData() {
return serializedData;
}
/**
* Gets the number of IDs in the checkpoint.
*
* @return The number of IDs in the checkpoint.
*/
public int getNumIds() {
return numIds;
}
// ------------------------------------------------------------------------
// Serialize to Checkpoint
// ------------------------------------------------------------------------
/**
* Converts a list of checkpoints with elements into an array of SerializedCheckpointData.
*
* @param checkpoints The checkpoints to be converted into IdsCheckpointData.
* @param serializer The serializer to serialize the IDs.
* @param <T> The type of the ID.
* @return An array of serializable SerializedCheckpointData, one per entry in the queue.
* @throws IOException Thrown, if the serialization fails.
*/
public static <T> SerializedCheckpointData[] fromDeque(
ArrayDeque<Tuple2<Long, Set<T>>> checkpoints, TypeSerializer<T> serializer)
throws IOException {
return fromDeque(checkpoints, serializer, new DataOutputSerializer(128));
}
/**
* Converts a list of checkpoints into an array of SerializedCheckpointData.
*
* @param checkpoints The checkpoints to be converted into IdsCheckpointData.
* @param serializer The serializer to serialize the IDs.
* @param outputBuffer The reusable serialization buffer.
* @param <T> The type of the ID.
* @return An array of serializable SerializedCheckpointData, one per entry in the queue.
* @throws IOException Thrown, if the serialization fails.
*/
public static <T> SerializedCheckpointData[] fromDeque(
ArrayDeque<Tuple2<Long, Set<T>>> checkpoints,
TypeSerializer<T> serializer,
DataOutputSerializer outputBuffer)
throws IOException {
SerializedCheckpointData[] serializedCheckpoints =
new SerializedCheckpointData[checkpoints.size()];
int pos = 0;
for (Tuple2<Long, Set<T>> checkpoint : checkpoints) {
outputBuffer.clear();
Set<T> checkpointIds = checkpoint.f1;
for (T id : checkpointIds) {
serializer.serialize(id, outputBuffer);
}
serializedCheckpoints[pos++] =
new SerializedCheckpointData(
checkpoint.f0, outputBuffer.getCopyOfBuffer(), checkpointIds.size());
}
return serializedCheckpoints;
}
// ------------------------------------------------------------------------
// De-Serialize from Checkpoint
// ------------------------------------------------------------------------
/**
* De-serializes an array of SerializedCheckpointData back into an ArrayDeque of element
* checkpoints.
*
* @param data The data to be deserialized.
* @param serializer The serializer used to deserialize the data.
* @param <T> The type of the elements.
* @return An ArrayDeque of element checkpoints.
* @throws IOException Thrown, if the serialization fails.
*/
public static <T> ArrayDeque<Tuple2<Long, Set<T>>> toDeque(
SerializedCheckpointData[] data, TypeSerializer<T> serializer) throws IOException {
ArrayDeque<Tuple2<Long, Set<T>>> deque = new ArrayDeque<>(data.length);
DataInputDeserializer deser = null;
for (SerializedCheckpointData checkpoint : data) {
byte[] serializedData = checkpoint.getSerializedData();
if (deser == null) {
deser = new DataInputDeserializer(serializedData, 0, serializedData.length);
} else {
deser.setBuffer(serializedData);
}
final Set<T> ids = CollectionUtil.newHashSetWithExpectedSize(checkpoint.getNumIds());
final int numIds = checkpoint.getNumIds();
for (int i = 0; i < numIds; i++) {
ids.add(serializer.deserialize(deser));
}
deque.addLast(new Tuple2<Long, Set<T>>(checkpoint.checkpointId, ids));
}
return deque;
}
}
| SerializedCheckpointData |
java | quarkusio__quarkus | independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/client/RegistryClientFactory.java | {
"start": 151,
"end": 286
} | interface ____ {
RegistryClient buildRegistryClient(RegistryConfig config) throws RegistryResolutionException;
}
| RegistryClientFactory |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java | {
"start": 753,
"end": 3034
} | class ____ {
public static final Setting.AffixSetting<String> SEARCH_ATTRIBUTE = Setting.affixKeySetting(
RealmSettings.realmSettingPrefix(LDAP_TYPE),
"user_search.attribute",
key -> Setting.simpleString(
key,
LdapUserSearchSessionFactorySettings.DEFAULT_USERNAME_ATTRIBUTE,
Setting.Property.NodeScope,
Setting.Property.DeprecatedWarning
)
);
public static final Setting.AffixSetting<String> SEARCH_BASE_DN = RealmSettings.simpleString(
LDAP_TYPE,
"user_search.base_dn",
Setting.Property.NodeScope
);
public static final Setting.AffixSetting<String> SEARCH_FILTER = RealmSettings.simpleString(
LDAP_TYPE,
"user_search.filter",
Setting.Property.NodeScope
);
public static final Setting.AffixSetting<LdapSearchScope> SEARCH_SCOPE = Setting.affixKeySetting(
RealmSettings.realmSettingPrefix(LDAP_TYPE),
"user_search.scope",
key -> new Setting<>(
key,
(String) null,
(String s) -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE),
Setting.Property.NodeScope
)
);
public static final Setting.AffixSetting<Boolean> POOL_ENABLED = Setting.affixKeySetting(
RealmSettings.realmSettingPrefix(LDAP_TYPE),
"user_search.pool.enabled",
key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)
);
private static final String DEFAULT_USERNAME_ATTRIBUTE = "uid";
private LdapUserSearchSessionFactorySettings() {}
public static Set<Setting.AffixSetting<?>> getSettings() {
Set<Setting.AffixSetting<?>> settings = new HashSet<>();
settings.addAll(SessionFactorySettings.getSettings(LDAP_TYPE));
settings.addAll(PoolingSessionFactorySettings.getSettings(LDAP_TYPE));
settings.add(SEARCH_BASE_DN);
settings.add(SEARCH_SCOPE);
settings.add(SEARCH_ATTRIBUTE);
settings.add(POOL_ENABLED);
settings.add(SEARCH_FILTER);
settings.addAll(SearchGroupsResolverSettings.getSettings(LDAP_TYPE));
settings.addAll(UserAttributeGroupsResolverSettings.getSettings());
return settings;
}
}
| LdapUserSearchSessionFactorySettings |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/LocalCachedMapCacheOptions.java | {
"start": 989,
"end": 1233
} | class ____<K, V> extends MapCacheOptions<K, V> {
/**
* Various strategies to avoid stale objects in local cache.
* Handle cases when map instance has been disconnected for a while.
*
*/
public | LocalCachedMapCacheOptions |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java | {
"start": 5380,
"end": 56414
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> getMockPlugins() {
final Collection<Class<? extends Plugin>> plugins = new ArrayList<>(super.getMockPlugins());
plugins.add(MockTransportService.TestPlugin.class);
return plugins;
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
XContentBuilder oldIndexMapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("properties")
.startObject("distance")
.field("type", "double")
.endObject()
.startObject("route_length_miles")
.field("type", "alias")
.field("path", "distance")
.endObject()
.startObject("playlist")
.field("type", "text")
.field("store", true)
.endObject()
.startObject("some_dimension")
.field("type", "keyword")
.field("time_series_dimension", true)
.endObject()
.startObject("some_metric")
.field("type", "long")
.field("time_series_metric", TimeSeriesParams.MetricType.COUNTER)
.endObject()
.startObject("secret_soundtrack")
.field("type", "alias")
.field("path", "playlist")
.endObject()
.startObject("old_field")
.field("type", "long")
.endObject()
.startObject("new_field")
.field("type", "alias")
.field("path", "old_field")
.endObject()
.endObject()
.endObject()
.endObject();
Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("some_dimension"))
.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2006-01-08T23:40:53.384Z")
.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z")
.build();
assertAcked(prepareCreate("old_index").setSettings(settings).setMapping(oldIndexMapping));
XContentBuilder newIndexMapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("properties")
.startObject("distance")
.field("type", "text")
.endObject()
.startObject("route_length_miles")
.field("type", "double")
.endObject()
.startObject("new_field")
.field("type", "long")
.endObject()
.startObject("some_dimension")
.field("type", "keyword")
.endObject()
.startObject("some_metric")
.field("type", "long")
.field("time_series_metric", TimeSeriesParams.MetricType.GAUGE)
.endObject()
.endObject()
.endObject()
.endObject();
assertAcked(prepareCreate("new_index").setMapping(newIndexMapping));
assertAcked(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias("new_index", "current"));
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(TestMapperPlugin.class, BlockingOnRewriteQueryPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
.build();
}
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
public void testFieldAlias() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("distance", "route_length_miles").get();
assertIndices(response, "old_index", "new_index");
// Ensure the response has entries for both requested fields.
assertTrue(response.get().containsKey("distance"));
assertTrue(response.get().containsKey("route_length_miles"));
// Check the capabilities for the 'distance' field.
Map<String, FieldCapabilities> distance = response.getField("distance");
assertEquals(2, distance.size());
assertTrue(distance.containsKey("double"));
assertEquals(new FieldCapabilitiesBuilder("distance", "double").indices("old_index").build(), distance.get("double"));
assertTrue(distance.containsKey("text"));
assertEquals(
new FieldCapabilitiesBuilder("distance", "text").isAggregatable(false).indices("new_index").build(),
distance.get("text")
);
// Check the capabilities for the 'route_length_miles' alias.
Map<String, FieldCapabilities> routeLength = response.getField("route_length_miles");
assertEquals(1, routeLength.size());
assertTrue(routeLength.containsKey("double"));
assertEquals(new FieldCapabilitiesBuilder("route_length_miles", "double").build(), routeLength.get("double"));
}
public void testFieldAliasWithWildcard() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("route*").get();
assertIndices(response, "old_index", "new_index");
assertEquals(1, response.get().size());
assertTrue(response.get().containsKey("route_length_miles"));
}
public void testFieldAliasFiltering() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("secret-soundtrack", "route_length_miles").get();
assertIndices(response, "old_index", "new_index");
assertEquals(1, response.get().size());
assertTrue(response.get().containsKey("route_length_miles"));
}
public void testFieldAliasFilteringWithWildcard() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("distance", "secret*").get();
assertIndices(response, "old_index", "new_index");
assertEquals(1, response.get().size());
assertTrue(response.get().containsKey("distance"));
}
public void testWithUnmapped() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("new_field", "old_field").setIncludeUnmapped(true).get();
assertIndices(response, "old_index", "new_index");
assertEquals(2, response.get().size());
assertTrue(response.get().containsKey("old_field"));
Map<String, FieldCapabilities> oldField = response.getField("old_field");
assertEquals(2, oldField.size());
assertTrue(oldField.containsKey("long"));
assertEquals(new FieldCapabilitiesBuilder("old_field", "long").indices("old_index").build(), oldField.get("long"));
assertTrue(oldField.containsKey("unmapped"));
assertEquals(
new FieldCapabilitiesBuilder("old_field", "unmapped").isSearchable(false).isAggregatable(false).indices("new_index").build(),
oldField.get("unmapped")
);
Map<String, FieldCapabilities> newField = response.getField("new_field");
assertEquals(1, newField.size());
assertTrue(newField.containsKey("long"));
assertEquals(new FieldCapabilitiesBuilder("new_field", "long").build(), newField.get("long"));
}
public void testWithIndexAlias() {
FieldCapabilitiesResponse response = client().prepareFieldCaps("current").setFields("*").get();
assertIndices(response, "new_index");
FieldCapabilitiesResponse response1 = client().prepareFieldCaps("current", "old_index").setFields("*").get();
assertIndices(response1, "old_index", "new_index");
FieldCapabilitiesResponse response2 = client().prepareFieldCaps("current", "old_index", "new_index").setFields("*").get();
assertEquals(response1, response2);
}
public void testNoIndices() {
boolean ignoreUnavailable = false;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("*").setIndicesOptions(options).get();
assertIndices(response, "new_index");
}
public void testNoIndicesIgnoreUnavailable() {
boolean ignoreUnavailable = true;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("*").setIndicesOptions(options).get();
assertIndices(response, "new_index");
}
public void testOneClosedIndex() {
boolean ignoreUnavailable = false;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
IndexClosedException ex = expectThrows(
IndexClosedException.class,
client().prepareFieldCaps("old_index").setFields("*").setIndicesOptions(options)
);
assertEquals("closed", ex.getMessage());
}
public void testOneClosedIndexIgnoreUnavailable() {
boolean ignoreUnavailable = true;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index").setFields("*").setIndicesOptions(options).get();
assertIndices(response);
}
public void testTwoIndicesOneClosed() {
boolean ignoreUnavailable = false;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
IndexClosedException ex = expectThrows(
IndexClosedException.class,
client().prepareFieldCaps("old_index", "new_index").setFields("*").setIndicesOptions(options)
);
assertEquals("closed", ex.getMessage());
}
public void testTwoIndicesOneClosedIgnoreUnavailable() {
boolean ignoreUnavailable = true;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index", "new_index")
.setFields("*")
.setIndicesOptions(options)
.get();
assertIndices(response, "new_index");
}
public void testWithIndexFilter() throws InterruptedException {
assertAcked(
prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword"),
prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")
);
List<IndexRequestBuilder> reqs = new ArrayList<>();
reqs.add(prepareIndex("index-1").setSource("timestamp", "2015-07-08"));
reqs.add(prepareIndex("index-1").setSource("timestamp", "2018-07-08"));
reqs.add(prepareIndex("index-2").setSource("timestamp", "2019-10-12"));
reqs.add(prepareIndex("index-2").setSource("timestamp", "2020-07-08"));
indexRandom(true, reqs);
FieldCapabilitiesResponse response = client().prepareFieldCaps("index-*").setFields("*").get();
assertIndices(response, "index-1", "index-2");
Map<String, FieldCapabilities> newField = response.getField("field1");
assertEquals(2, newField.size());
assertTrue(newField.containsKey("long"));
assertTrue(newField.containsKey("keyword"));
response = client().prepareFieldCaps("index-*")
.setFields("*")
.setIndexFilter(QueryBuilders.rangeQuery("timestamp").gte("2019-11-01"))
.get();
assertIndices(response, "index-2");
newField = response.getField("field1");
assertEquals(1, newField.size());
assertTrue(newField.containsKey("long"));
response = client().prepareFieldCaps("index-*")
.setFields("*")
.setIndexFilter(QueryBuilders.rangeQuery("timestamp").lte("2017-01-01"))
.get();
assertIndices(response, "index-1");
newField = response.getField("field1");
assertEquals(1, newField.size());
assertTrue(newField.containsKey("keyword"));
}
public void testMetadataFields() {
for (int i = 0; i < 2; i++) {
String[] fields = i == 0 ? new String[] { "*" } : new String[] { "_id", "_test" };
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields(fields).get();
Map<String, FieldCapabilities> idField = response.getField("_id");
assertEquals(1, idField.size());
assertTrue(idField.containsKey("_id"));
assertEquals(
new FieldCapabilitiesBuilder("_id", "_id").isMetadataField(true).isAggregatable(false).build(),
idField.get("_id")
);
Map<String, FieldCapabilities> testField = response.getField("_test");
assertEquals(1, testField.size());
assertTrue(testField.containsKey("keyword"));
assertEquals(new FieldCapabilitiesBuilder("_test", "keyword").isMetadataField(true).build(), testField.get("keyword"));
}
}
public void testWithRunntimeMappings() throws InterruptedException {
Map<String, Object> runtimeFields = new HashMap<>();
runtimeFields.put("day_of_week", Collections.singletonMap("type", "keyword"));
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("*").setRuntimeFields(runtimeFields).get();
Map<String, FieldCapabilities> runtimeField = response.getField("day_of_week");
assertNotNull(runtimeField);
assertEquals("day_of_week", runtimeField.get("keyword").getName());
assertEquals("keyword", runtimeField.get("keyword").getType());
assertTrue(runtimeField.get("keyword").isSearchable());
assertTrue(runtimeField.get("keyword").isAggregatable());
}
public void testFieldMetricsAndDimensions() {
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index").setFields("some_dimension", "some_metric").get();
assertIndices(response, "old_index");
assertEquals(2, response.get().size());
assertTrue(response.get().containsKey("some_dimension"));
assertTrue(response.get().get("some_dimension").get("keyword").isDimension());
assertNull(response.get().get("some_dimension").get("keyword").nonDimensionIndices());
assertTrue(response.get().containsKey("some_metric"));
assertEquals(TimeSeriesParams.MetricType.COUNTER, response.get().get("some_metric").get("long").getMetricType());
assertNull(response.get().get("some_metric").get("long").metricConflictsIndices());
response = client().prepareFieldCaps("old_index", "new_index").setFields("some_dimension", "some_metric").get();
assertIndices(response, "old_index", "new_index");
assertEquals(2, response.get().size());
assertTrue(response.get().containsKey("some_dimension"));
assertFalse(response.get().get("some_dimension").get("keyword").isDimension());
assertThat(response.get().get("some_dimension").get("keyword").nonDimensionIndices(), array(equalTo("new_index")));
}
public void testFailures() throws IOException {
// in addition to the existing "old_index" and "new_index", create two where the test query throws an error on rewrite
assertAcked(prepareCreate("index1-error"), prepareCreate("index2-error"));
ensureGreen("index1-error", "index2-error");
// Closed shards will result to index error because shards must be in readable state
closeShards(internalCluster(), "index1-error", "index2-error");
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index", "new_index", "index1-error", "index2-error")
.setFields("*")
.get();
assertEquals(1, response.getFailures().size());
assertEquals(2, response.getFailedIndicesCount());
assertThat(response.getFailures().get(0).getIndices(), arrayContainingInAnyOrder("index1-error", "index2-error"));
Exception failure = response.getFailures().get(0).getException();
assertEquals(IllegalIndexShardStateException.class, failure.getClass());
assertEquals(
"CurrentState[CLOSED] operations only allowed when shard state is one of [POST_RECOVERY, STARTED]",
failure.getMessage()
);
// the "indices" section should not include failed ones
assertThat(Arrays.asList(response.getIndices()), containsInAnyOrder("old_index", "new_index"));
// if all requested indices failed, we fail the request by throwing the exception
IllegalIndexShardStateException ex = expectThrows(
IllegalIndexShardStateException.class,
client().prepareFieldCaps("index1-error", "index2-error").setFields("*")
);
assertEquals("CurrentState[CLOSED] operations only allowed when shard state is one of [POST_RECOVERY, STARTED]", ex.getMessage());
}
private void populateTimeRangeIndices() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
assertAcked(
prepareCreate("log-index-1").setSettings(indexSettings(between(1, 5), 1))
.setMapping("timestamp", "type=date", "field1", "type=keyword"),
prepareCreate("log-index-2").setSettings(indexSettings(between(1, 5), 1))
.setMapping("timestamp", "type=date", "field1", "type=long")
);
List<IndexRequestBuilder> reqs = new ArrayList<>();
reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2015-07-08"));
reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2018-07-08"));
reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-03-03"));
reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-09-09"));
reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2019-10-12"));
reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-02-02"));
reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-10-10"));
indexRandom(true, reqs);
ensureGreen("log-index-1", "log-index-2");
indicesAdmin().prepareRefresh("log-index-1", "log-index-2").get();
}
public void testTargetNodeFails() throws Exception {
populateTimeRangeIndices();
try {
final AtomicBoolean failedRequest = new AtomicBoolean();
for (String node : internalCluster().getNodeNames()) {
MockTransportService.getInstance(node)
.addRequestHandlingBehavior(TransportFieldCapabilitiesAction.ACTION_NODE_NAME, (handler, request, channel, task) -> {
if (failedRequest.compareAndSet(false, true)) {
channel.sendResponse(new CircuitBreakingException("Simulated", CircuitBreaker.Durability.TRANSIENT));
} else {
handler.messageReceived(request, channel, task);
}
});
}
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("log-index-*");
request.fields("*");
if (randomBoolean()) {
request.indexFilter(QueryBuilders.rangeQuery("timestamp").gte("2020-01-01"));
}
final FieldCapabilitiesResponse response = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet();
assertTrue(failedRequest.get());
assertThat(response.getIndices(), arrayContainingInAnyOrder("log-index-1", "log-index-2"));
assertThat(response.getField("field1"), aMapWithSize(2));
assertThat(response.getField("field1"), hasKey("long"));
assertThat(response.getField("field1"), hasKey("keyword"));
} finally {
for (String node : internalCluster().getNodeNames()) {
MockTransportService.getInstance(node).clearAllRules();
}
}
}
private void populateIndices() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
assertAcked(
prepareCreate("index-1").setSettings(indexSettings(between(1, 5), 1))
.setMapping("timestamp", "type=date", "field1", "type=keyword", "field3", "type=keyword"),
prepareCreate("index-2").setSettings(indexSettings(between(1, 5), 1))
.setMapping("timestamp", "type=date", "field2", "type=long", "field3", "type=long")
);
}
public void testIncludeIndices() throws Exception {
populateIndices();
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("index-*");
request.fields("*");
request.includeIndices(true);
final FieldCapabilitiesResponse response = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet();
assertThat(response.getIndices(), arrayContainingInAnyOrder("index-1", "index-2"));
assertThat(response.getField("timestamp"), aMapWithSize(1));
assertThat(response.getField("timestamp"), hasKey("date"));
assertThat(response.getField("timestamp").get("date").indices(), arrayContainingInAnyOrder("index-1", "index-2"));
assertThat(response.getField("field1"), aMapWithSize(1));
assertThat(response.getField("field1"), hasKey("keyword"));
assertThat(response.getField("field1").get("keyword").indices(), arrayContaining("index-1"));
assertThat(response.getField("field2"), aMapWithSize(1));
assertThat(response.getField("field2"), hasKey("long"));
assertThat(response.getField("field2").get("long").indices(), arrayContaining("index-2"));
assertThat(response.getField("field3"), aMapWithSize(2));
assertThat(response.getField("field3"), hasKey("long"));
assertThat(response.getField("field3"), hasKey("keyword"));
assertThat(response.getField("field3").get("long").indices(), arrayContaining("index-2"));
assertThat(response.getField("field3").get("keyword").indices(), arrayContaining("index-1"));
}
public void testRandomIncludeIndices() throws Exception {
populateIndices();
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("index-*");
request.fields("*");
boolean shouldAlwaysIncludeIndices = randomBoolean();
request.includeIndices(shouldAlwaysIncludeIndices);
final FieldCapabilitiesResponse response = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet();
assertThat(response.getIndices(), arrayContainingInAnyOrder("index-1", "index-2"));
assertThat(response.getField("timestamp"), aMapWithSize(1));
assertThat(response.getField("timestamp"), hasKey("date"));
if (shouldAlwaysIncludeIndices) {
assertThat(response.getField("timestamp").get("date").indices(), arrayContainingInAnyOrder("index-1", "index-2"));
} else {
assertNull(response.getField("timestamp").get("date").indices());
}
assertThat(response.getField("field1"), aMapWithSize(1));
assertThat(response.getField("field1"), hasKey("keyword"));
if (shouldAlwaysIncludeIndices) {
assertThat(response.getField("field1").get("keyword").indices(), arrayContaining("index-1"));
} else {
assertNull(response.getField("field1").get("keyword").indices());
}
assertThat(response.getField("field2"), aMapWithSize(1));
assertThat(response.getField("field2"), hasKey("long"));
if (shouldAlwaysIncludeIndices) {
assertThat(response.getField("field2").get("long").indices(), arrayContaining("index-2"));
} else {
assertNull(response.getField("field2").get("long").indices());
}
assertThat(response.getField("field3"), aMapWithSize(2));
assertThat(response.getField("field3"), hasKey("long"));
assertThat(response.getField("field3"), hasKey("keyword"));
// mapping conflict, therefore indices is always present for `field3`
assertThat(response.getField("field3").get("long").indices(), arrayContaining("index-2"));
assertThat(response.getField("field3").get("keyword").indices(), arrayContaining("index-1"));
}
public void testNoActiveCopy() throws Exception {
assertAcked(
prepareCreate("log-index-inactive").setSettings(
indexSettings(between(1, 5), 1).put("index.routing.allocation.require._id", "unknown")
).setWaitForActiveShards(ActiveShardCount.NONE).setMapping("timestamp", "type=date", "field1", "type=keyword")
);
{
final ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
client().prepareFieldCaps("log-index-*").setFields("*")
);
assertThat(ex.getMessage(), equalTo("index [log-index-inactive] has no active shard copy"));
}
{
populateTimeRangeIndices();
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("log-index-*");
request.fields("*");
if (randomBoolean()) {
request.indexFilter(QueryBuilders.rangeQuery("timestamp").gte("2020-01-01"));
}
final FieldCapabilitiesResponse response = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet();
assertThat(response.getIndices(), arrayContainingInAnyOrder("log-index-1", "log-index-2"));
assertThat(response.getField("field1"), aMapWithSize(2));
assertThat(response.getField("field1"), hasKey("long"));
assertThat(response.getField("field1"), hasKey("long"));
assertThat(response.getFailures(), hasSize(1));
final FieldCapabilitiesFailure failure = response.getFailures().get(0);
assertThat(failure.getIndices(), arrayContainingInAnyOrder("log-index-inactive"));
assertThat(failure.getException().getMessage(), equalTo("index [log-index-inactive] has no active shard copy"));
}
}
private void moveOrCloseShardsOnNodes(String nodeName, Predicate<String> indexName) throws Exception {
final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName);
final ClusterState clusterState = clusterService().state();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
if (indexName.test(indexShard.shardId().getIndexName()) == false) {
continue;
}
if (randomBoolean()) {
closeShardNoCheck(indexShard, randomBoolean());
} else if (randomBoolean()) {
final ShardId shardId = indexShard.shardId();
final var assignedNodes = new HashSet<>();
clusterState.routingTable().shardRoutingTable(shardId).allShards().forEach(shr -> {
if (shr.currentNodeId() != null) {
assignedNodes.add(shr.currentNodeId());
}
if (shr.relocatingNodeId() != null) {
assignedNodes.add(shr.relocatingNodeId());
}
});
final var targetNodes = new ArrayList<String>();
for (final var targetIndicesService : internalCluster().getInstances(IndicesService.class)) {
final var targetNode = targetIndicesService.clusterService().localNode();
if (targetNode.canContainData()
&& targetIndicesService.getShardOrNull(shardId) == null
&& assignedNodes.contains(targetNode.getId()) == false) {
targetNodes.add(targetNode.getId());
}
}
if (targetNodes.isEmpty()) {
continue;
}
ClusterRerouteUtils.reroute(
client(),
new MoveAllocationCommand(
shardId.getIndexName(),
shardId.id(),
indicesService.clusterService().localNode().getId(),
randomFrom(targetNodes)
)
);
}
}
}
}
public void testRelocation() throws Exception {
populateTimeRangeIndices();
assertAcked(
client().admin()
.indices()
.prepareUpdateSettings("log-index-*")
.setSettings(Settings.builder().put("index.routing.rebalance.enable", "none").build())
.get()
);
ensureGreen("log-index-*");
try {
final AtomicBoolean relocated = new AtomicBoolean();
for (String node : internalCluster().getNodeNames()) {
MockTransportService.getInstance(node)
.addRequestHandlingBehavior(TransportFieldCapabilitiesAction.ACTION_NODE_NAME, (handler, request, channel, task) -> {
if (relocated.compareAndSet(false, true)) {
moveOrCloseShardsOnNodes(node, indexName -> indexName.startsWith("log-index-"));
}
handler.messageReceived(request, channel, task);
});
}
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("log-index-*");
request.fields("*");
if (randomBoolean()) {
request.indexFilter(QueryBuilders.rangeQuery("timestamp").gte("2020-01-01"));
}
final FieldCapabilitiesResponse response = safeGet(client().execute(TransportFieldCapabilitiesAction.TYPE, request));
assertThat(response.getIndices(), arrayContainingInAnyOrder("log-index-1", "log-index-2"));
assertThat(response.getField("field1"), aMapWithSize(2));
assertThat(response.getField("field1"), hasKey("long"));
assertThat(response.getField("field1"), hasKey("long"));
} finally {
for (String node : internalCluster().getNodeNames()) {
MockTransportService.getInstance(node).clearAllRules();
}
}
}
public void testManyIndicesWithSameMapping() {
final String mapping = """
{
"properties": {
"message_field": { "type": "text" },
"value_field": { "type": "long" },
"multi_field" : { "type" : "ip", "fields" : { "keyword" : { "type" : "keyword" } } },
"timestamp": {"type": "date"}
}
}
""";
String[] indices = IntStream.range(0, between(1, 9)).mapToObj(n -> "test_many_index_" + n).toArray(String[]::new);
assertAcked(
Arrays.stream(indices)
.map(index -> indicesAdmin().prepareCreate(index).setMapping(mapping))
.toArray(CreateIndexRequestBuilder[]::new)
);
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("test_many_index_*");
request.fields("*");
boolean excludeMultiField = randomBoolean();
if (excludeMultiField) {
request.filters("-multifield");
}
Consumer<FieldCapabilitiesResponse> verifyResponse = resp -> {
assertThat(resp.getIndices(), equalTo(indices));
assertThat(resp.getField("message_field"), hasKey("text"));
assertThat(resp.getField("message_field").get("text").indices(), nullValue());
assertTrue(resp.getField("message_field").get("text").isSearchable());
assertFalse(resp.getField("message_field").get("text").isAggregatable());
assertThat(resp.getField("value_field"), hasKey("long"));
assertThat(resp.getField("value_field").get("long").indices(), nullValue());
assertTrue(resp.getField("value_field").get("long").isSearchable());
assertTrue(resp.getField("value_field").get("long").isAggregatable());
assertThat(resp.getField("timestamp"), hasKey("date"));
assertThat(resp.getField("multi_field"), hasKey("ip"));
if (excludeMultiField) {
assertThat(resp.getField("multi_field.keyword"), not(hasKey("keyword")));
} else {
assertThat(resp.getField("multi_field.keyword"), hasKey("keyword"));
}
};
// Single mapping
verifyResponse.accept(client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet());
// add an extra field for some indices
String[] indicesWithExtraField = randomSubsetOf(between(1, indices.length), indices).stream().sorted().toArray(String[]::new);
ensureGreen(indices);
assertAcked(indicesAdmin().preparePutMapping(indicesWithExtraField).setSource("extra_field", "type=integer").get());
for (String index : indicesWithExtraField) {
prepareIndex(index).setSource("extra_field", randomIntBetween(1, 1000)).get();
}
FieldCapabilitiesResponse resp = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet();
verifyResponse.accept(resp);
assertThat(resp.getField("extra_field"), hasKey("integer"));
assertThat(resp.getField("extra_field").get("integer").indices(), nullValue());
assertTrue(resp.getField("extra_field").get("integer").isSearchable());
assertTrue(resp.getField("extra_field").get("integer").isAggregatable());
}
@TestLogging(
value = "org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction:TRACE",
reason = "verify the log output on cancelled"
)
public void testCancel() throws Exception {
try (var mockLog = MockLog.capture(TransportFieldCapabilitiesAction.class)) {
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"clear resources",
TransportFieldCapabilitiesAction.class.getCanonicalName(),
Level.TRACE,
"clear index responses on cancellation submitted"
)
);
BlockingOnRewriteQueryBuilder.blockOnRewrite();
PlainActionFuture<Response> future = new PlainActionFuture<>();
Request restRequest = new Request("POST", "/_field_caps?fields=*");
restRequest.setEntity(new StringEntity("""
{
"index_filter": {
"blocking_query": {}
}
}
""", ContentType.APPLICATION_JSON.withCharset(StandardCharsets.UTF_8)));
Cancellable cancellable = getRestClient().performRequestAsync(restRequest, wrapAsRestResponseListener(future));
logger.info("--> waiting for field-caps tasks to be started");
assertBusy(() -> {
List<TaskInfo> tasks = clusterAdmin().prepareListTasks()
.setActions("indices:data/read/field_caps", "indices:data/read/field_caps[n]")
.get()
.getTasks();
assertThat(tasks.size(), greaterThanOrEqualTo(2));
for (TaskInfo task : tasks) {
assertTrue(task.cancellable());
assertFalse(task.cancelled());
}
}, 30, TimeUnit.SECONDS);
cancellable.cancel();
mockLog.awaitAllExpectationsMatched();
logger.info("--> waiting for field-caps tasks to be cancelled");
assertBusy(() -> {
List<TaskInfo> tasks = clusterAdmin().prepareListTasks()
.setActions("indices:data/read/field_caps", "indices:data/read/field_caps[n]")
.get()
.getTasks();
for (TaskInfo task : tasks) {
assertTrue(task.cancellable());
assertTrue(task.cancelled());
}
}, 30, TimeUnit.SECONDS);
BlockingOnRewriteQueryBuilder.unblockOnRewrite();
expectThrows(CancellationException.class, future::actionGet);
}
}
public void testIndexMode() throws Exception {
Map<String, IndexMode> indexModes = new HashMap<>();
// metrics
{
final String metricsMapping = """
{
"properties": {
"@timestamp": { "type": "date" },
"hostname": { "type": "keyword", "time_series_dimension": true },
"request_count" : { "type" : "long", "time_series_metric" : "counter" },
"cluster": {"type": "keyword"}
}
}
""";
Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("hostname")).build();
int numIndices = between(1, 5);
for (int i = 0; i < numIndices; i++) {
assertAcked(
indicesAdmin().prepareCreate("test_metrics_" + i).setSettings(settings).setMapping(metricsMapping),
indicesAdmin().prepareCreate("test_old_metrics_" + i).setMapping(metricsMapping)
);
indexModes.put("test_metrics_" + i, IndexMode.TIME_SERIES);
indexModes.put("test_old_metrics_" + i, IndexMode.STANDARD);
}
}
// logsdb
{
final String logsMapping = """
{
"properties": {
"@timestamp": { "type": "date" },
"hostname": { "type": "keyword"},
"request_count" : { "type" : "long"},
"cluster": {"type": "keyword"}
}
}
""";
Settings settings = Settings.builder().put("mode", "logsdb").build();
int numIndices = between(1, 5);
for (int i = 0; i < numIndices; i++) {
assertAcked(
indicesAdmin().prepareCreate("test_logs_" + i).setSettings(settings).setMapping(logsMapping),
indicesAdmin().prepareCreate("test_old_logs_" + i).setMapping(logsMapping)
);
indexModes.put("test_logs_" + i, IndexMode.LOGSDB);
indexModes.put("test_old_logs_" + i, IndexMode.STANDARD);
}
}
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.setMergeResults(false);
request.indices("test_*");
request.fields(randomFrom("*", "@timestamp", "host*"));
var resp = client().fieldCaps(request).get();
assertThat(resp.getFailures(), empty());
Map<String, IndexMode> actualIndexModes = new HashMap<>();
for (var indexResp : resp.getIndexResponses()) {
actualIndexModes.put(indexResp.getIndexName(), indexResp.getIndexMode());
}
assertThat(actualIndexModes, equalTo(indexModes));
}
public void testResolvedExpressionWithIndexAlias() {
FieldCapabilitiesResponse response = client().prepareFieldCaps("current").setFields("*").setIncludeResolvedTo(true).get();
assertIndices(response, "new_index");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("current", expression.original());
assertThat(expression.localExpressions().indices(), containsInAnyOrder("new_index"));
}
public void testResolvedExpressionWithWildcard() {
FieldCapabilitiesResponse response = client().prepareFieldCaps("*index").setFields("*").setIncludeResolvedTo(true).get();
assertIndices(response, "new_index", "old_index");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("*index", expression.original());
assertThat(expression.localExpressions().indices(), containsInAnyOrder("new_index", "old_index"));
}
public void testResolvedExpressionWithClosedIndices() throws IOException {
// in addition to the existing "old_index" and "new_index", create two where the test query throws an error on rewrite
assertAcked(prepareCreate("index1-error"), prepareCreate("index2-error"));
ensureGreen("index1-error", "index2-error");
// Closed shards will result to index error because shards must be in readable state
closeShards(internalCluster(), "index1-error", "index2-error");
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index", "new_index", "index1-error", "index2-error")
.setFields("*")
.setIncludeResolvedTo(true)
.get();
Set<String> openIndices = Set.of("old_index", "new_index");
Set<String> closedIndices = Set.of("index1-error", "index2-error");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(4, resolvedLocally.expressions().size());
for (ResolvedIndexExpression expression : expressions) {
ResolvedIndexExpression.LocalExpressions localExpressions = expression.localExpressions();
if (openIndices.contains(expression.original())) {
assertThat(expression.localExpressions().indices(), containsInAnyOrder(expression.original()));
assertEquals(ResolvedIndexExpression.LocalIndexResolutionResult.SUCCESS, localExpressions.localIndexResolutionResult());
} else if (closedIndices.contains(expression.original())) {
Set<String> concreteIndices = localExpressions.indices();
assertEquals(0, concreteIndices.size());
assertEquals(
ResolvedIndexExpression.LocalIndexResolutionResult.CONCRETE_RESOURCE_NOT_VISIBLE,
localExpressions.localIndexResolutionResult()
);
}
}
}
public void testResolvedExpressionWithAllIndices() {
FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("*").setIncludeResolvedTo(true).get();
assertIndices(response, "new_index", "old_index");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("_all", expression.original()); // not setting indices means _all
ResolvedIndexExpression.LocalExpressions localExpressions = expression.localExpressions();
assertThat(expression.localExpressions().indices(), containsInAnyOrder("new_index", "old_index"));
assertEquals(ResolvedIndexExpression.LocalIndexResolutionResult.SUCCESS, localExpressions.localIndexResolutionResult());
}
public void testResolvedExpressionWithOnlyOneClosedIndexAndIgnoreUnavailable() {
boolean ignoreUnavailable = true;
IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false);
client().admin().indices().close(new CloseIndexRequest("old_index")).actionGet();
FieldCapabilitiesResponse response = client().prepareFieldCaps("old_index")
.setFields("*")
.setIndicesOptions(options)
.setIncludeResolvedTo(true)
.get();
assertIndices(response);
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, expressions.size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("old_index", expression.original());
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression.LocalExpressions localExpressions = expression.localExpressions();
Set<String> concreteIndices = localExpressions.indices();
assertEquals(0, concreteIndices.size());
assertEquals(
ResolvedIndexExpression.LocalIndexResolutionResult.CONCRETE_RESOURCE_NOT_VISIBLE,
localExpressions.localIndexResolutionResult()
);
}
public void testResolvedExpressionWithIndexFilter() throws InterruptedException {
assertAcked(
prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword"),
prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")
);
List<IndexRequestBuilder> reqs = new ArrayList<>();
reqs.add(prepareIndex("index-1").setSource("timestamp", "2015-07-08"));
reqs.add(prepareIndex("index-1").setSource("timestamp", "2018-07-08"));
reqs.add(prepareIndex("index-2").setSource("timestamp", "2019-10-12"));
reqs.add(prepareIndex("index-2").setSource("timestamp", "2020-07-08"));
indexRandom(true, reqs);
FieldCapabilitiesResponse response = client().prepareFieldCaps("index-*")
.setFields("*")
.setIndexFilter(QueryBuilders.rangeQuery("timestamp").gte("2019-11-01"))
.setIncludeResolvedTo(true)
.get();
assertIndices(response, "index-2");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("index-*", expression.original());
assertThat(expression.localExpressions().indices(), containsInAnyOrder("index-1", "index-2"));
}
public void testNoneExpressionIndices() {
// The auth code injects the pattern ["*", "-*"] which effectively means a request that requests no indices
FieldCapabilitiesResponse response = client().prepareFieldCaps("*", "-*").setFields("*").get();
assertThat(response.getIndices().length, is(0));
}
public void testExclusion() {
assertAcked(prepareCreate("index-2024"), prepareCreate("index-2025"));
prepareIndex("index-2024").setSource("timestamp", "2024", "f1", "1").get();
prepareIndex("index-2025").setSource("timestamp", "2025", "f2", "2").get();
var response = client().prepareFieldCaps("index-*", "-*2025").setFields("*").get();
assertIndices(response, "index-2024");
}
public void testExclusionWithResolvedTo() {
assertAcked(prepareCreate("index-2024"), prepareCreate("index-2025"));
prepareIndex("index-2024").setSource("timestamp", "2024", "f1", "1").get();
prepareIndex("index-2025").setSource("timestamp", "2025", "f2", "2").get();
var response = client().prepareFieldCaps("index-*", "-*2025").setFields("*").setIncludeResolvedTo(true).get();
assertIndices(response, "index-2024");
assertEquals(0, response.getResolvedRemotely().size());
ResolvedIndexExpressions resolvedLocally = response.getResolvedLocally();
List<ResolvedIndexExpression> expressions = resolvedLocally.expressions();
assertEquals(1, resolvedLocally.expressions().size());
ResolvedIndexExpression expression = expressions.get(0);
assertEquals("index-*", expression.original());
assertThat(expression.localExpressions().indices(), containsInAnyOrder("index-2024", "index-2025"));
}
private void assertIndices(FieldCapabilitiesResponse response, String... indices) {
assertNotNull(response.getIndices());
Arrays.sort(indices);
Arrays.sort(response.getIndices());
assertArrayEquals(indices, response.getIndices());
}
static void closeShards(InternalTestCluster cluster, String... indices) throws IOException {
final Set<String> indicesToClose = Set.of(indices);
for (String node : cluster.getNodeNames()) {
final IndicesService indicesService = cluster.getInstance(IndicesService.class, node);
for (IndexService indexService : indicesService) {
if (indicesToClose.contains(indexService.getMetadata().getIndex().getName())) {
for (IndexShard indexShard : indexService) {
closeShardNoCheck(indexShard);
}
}
}
}
}
public static | FieldCapabilitiesIT |
java | elastic__elasticsearch | build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java | {
"start": 829,
"end": 5276
} | class ____ {
protected static Project rootProject;
protected static Project archivesProject;
protected static Project packagesProject;
protected static Project bwcProject;
protected static final Version BWC_MAJOR_VERSION = Version.fromString("2.0.0");
protected static final Version BWC_MINOR_VERSION = Version.fromString("1.1.0");
protected static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0");
protected static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1");
protected static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1");
protected static final List<DevelopmentBranch> DEVELOPMENT_BRANCHES = Arrays.asList(
new DevelopmentBranch("main", Version.fromString("2.0.0")),
new DevelopmentBranch("1.1", Version.fromString("1.1.0")),
new DevelopmentBranch("1.0", Version.fromString("1.0.1")),
new DevelopmentBranch("0.90", Version.fromString("0.90.1"))
);
protected static final BwcVersions BWC_MINOR = new BwcVersions(
BWC_MAJOR_VERSION,
Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_STAGED = new BwcVersions(
BWC_MAJOR_VERSION,
Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_BUGFIX = new BwcVersions(
BWC_MAJOR_VERSION,
Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION),
DEVELOPMENT_BRANCHES
);
protected static final BwcVersions BWC_MAINTENANCE = new BwcVersions(
BWC_MINOR_VERSION,
Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION),
DEVELOPMENT_BRANCHES
);
protected static String projectName(String base, boolean bundledJdk) {
String prefix = bundledJdk == false ? "no-jdk-" : "";
return prefix + base;
}
protected void checkBwc(
String projectName,
String config,
Version version,
ElasticsearchDistributionType type,
ElasticsearchDistribution.Platform platform,
BwcVersions bwcVersions
) {
Project project = createProject(bwcVersions);
Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build();
archiveProject.getConfigurations().create(config);
archiveProject.getArtifacts().add(config, new File("doesnotmatter"));
createDistro(project, "distro", version.toString(), type, platform, true);
}
protected ElasticsearchDistribution createDistro(
Project project,
String name,
String version,
ElasticsearchDistributionType type,
ElasticsearchDistribution.Platform platform,
Boolean bundledJdk
) {
NamedDomainObjectContainer<ElasticsearchDistribution> distros = DistributionDownloadPlugin.getContainer(project);
return distros.create(name, distro -> {
if (version != null) {
distro.setVersion(version);
}
if (type != null) {
distro.setType(type);
}
if (platform != null) {
distro.setPlatform(platform);
}
if (bundledJdk != null) {
distro.setBundledJdk(bundledJdk);
}
}).maybeFreeze();
}
protected Project createProject(BwcVersions bwcVersions) {
rootProject = ProjectBuilder.builder().build();
Project distributionProject = ProjectBuilder.builder().withParent(rootProject).withName("distribution").build();
archivesProject = ProjectBuilder.builder().withParent(distributionProject).withName("archives").build();
packagesProject = ProjectBuilder.builder().withParent(distributionProject).withName("packages").build();
bwcProject = ProjectBuilder.builder().withParent(distributionProject).withName("bwc").build();
Project project = ProjectBuilder.builder().withParent(rootProject).build();
if (bwcVersions != null) {
project.getExtensions().getExtraProperties().set("bwcVersions", bwcVersions);
}
project.getPlugins().apply("elasticsearch.distribution-download");
return project;
}
}
| AbstractDistributionDownloadPluginTests |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapSession.java | {
"start": 5094,
"end": 6241
} | interface ____ {
/**
* Asynchronously resolve the group name for the given ldap user
* @param ldapConnection an authenticated {@link LDAPConnection} to be used for LDAP queries
* @param userDn the distinguished name of the ldap user
* @param timeout the timeout for any ldap operation
* @param logger the logger to use if necessary
* @param attributes a collection of attributes that were previously retrieved for the user such as during a user search.
* {@code null} indicates that the attributes have not been attempted to be retrieved
* @param listener the listener to call on a result or on failure
*/
void resolve(
LDAPInterface ldapConnection,
String userDn,
TimeValue timeout,
Logger logger,
Collection<Attribute> attributes,
ActionListener<List<String>> listener
);
/**
* Returns the attributes that this resolvers uses. If no attributes are required, return {@code null}.
*/
String[] attributes();
}
}
| GroupsResolver |
java | junit-team__junit5 | documentation/src/test/java/example/ExternalFieldSourceDemo.java | {
"start": 752,
"end": 856
} | class ____ {
public static final List<String> tropicalFruits = List.of("pineapple", "kiwi");
}
| FruitUtils |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/util/JvmShutdownSafeguard.java | {
"start": 2836,
"end": 4647
} | class ____ implements Runnable {
private final long delayMillis;
private DelayedTerminator(long delayMillis) {
this.delayMillis = delayMillis;
}
@Override
public void run() {
try {
Thread.sleep(delayMillis);
} catch (Throwable t) {
// catch all, including thread death, etc
}
Runtime.getRuntime().halt(EXIT_CODE);
}
}
// ------------------------------------------------------------------------
// Installing as a shutdown hook
// ------------------------------------------------------------------------
/**
* Installs the safeguard shutdown hook. The maximum time that the JVM is allowed to spend on
* shutdown before being killed is five seconds.
*
* @param logger The logger to log errors to.
*/
public static void installAsShutdownHook(Logger logger) {
installAsShutdownHook(logger, DEFAULT_DELAY);
}
/**
* Installs the safeguard shutdown hook. The maximum time that the JVM is allowed to spend on
* shutdown before being killed is the given number of milliseconds.
*
* @param logger The logger to log errors to.
* @param delayMillis The delay (in milliseconds) to wait after clean shutdown was stared,
* before forcibly terminating the JVM.
*/
public static void installAsShutdownHook(Logger logger, long delayMillis) {
checkArgument(delayMillis >= 0, "delay must be >= 0");
// install the blocking shutdown hook
Thread shutdownHook = new JvmShutdownSafeguard(delayMillis);
ShutdownHookUtil.addShutdownHookThread(
shutdownHook, JvmShutdownSafeguard.class.getSimpleName(), logger);
}
}
| DelayedTerminator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/idclass/IdClassWithEagerManyToOneTest.java | {
"start": 1078,
"end": 5974
} | class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Subsystem subsystem = new Subsystem( "1", "Linux" );
SystemUser systemUser = new SystemUser( subsystem, "admin", "Andrea" );
session.persist( subsystem );
session.persist( systemUser );
}
);
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testGet(SessionFactoryScope scope) {
SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction(
session -> {
PK pk = new PK( new Subsystem( "1", "Linux2" ), "admin" );
SystemUser systemUser = session.get( SystemUser.class, pk );
assertThat( systemUser.getName(), is( "Andrea" ) );
Subsystem subsystem = systemUser.getSubsystem();
statementInspector.assertExecutedCount( 1 );
statementInspector.assertNumberOfOccurrenceInQuery(
0,
"join",
1
);
statementInspector.clear();
assertTrue( Hibernate.isInitialized( subsystem ) );
assertThat( subsystem.getId(), is( "1" ) );
assertThat( subsystem.getDescription(), is( "Linux" ) );
assertThat( systemUser.getUsername(), is( "admin" ) );
statementInspector.assertExecutedCount( 0 );
}
);
}
@Test
public void testHql(SessionFactoryScope scope) {
SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction(
session -> {
PK pk = new PK( new Subsystem( "1", "Linux2" ), "admin" );
SystemUser systemUser = session.createQuery(
"from SystemUser s where s.id = :id",
SystemUser.class
).setParameter( "id", pk ).getSingleResult();
assertThat( systemUser.getName(), is( "Andrea" ) );
Subsystem subsystem = systemUser.getSubsystem();
assertTrue( Hibernate.isInitialized( subsystem ) );
statementInspector.assertExecutedCount( 2 );
statementInspector.assertNumberOfOccurrenceInQuery(
0,
"join",
0
);
statementInspector.assertNumberOfOccurrenceInQuery(
1,
"join",
0
);
statementInspector.clear();
assertThat( subsystem.getId(), is( "1" ) );
assertThat( subsystem.getDescription(), is( "Linux" ) );
assertThat( systemUser.getUsername(), is( "admin" ) );
assertTrue( Hibernate.isInitialized( subsystem ) );
statementInspector.assertExecutedCount( 0 );
}
);
statementInspector.clear();
scope.inTransaction(
session -> {
SystemUser systemUser = session.createQuery(
"from SystemUser s where s.username = :username",
SystemUser.class
).setParameter( "username", "admin" ).getSingleResult();
assertThat( systemUser.getName(), is( "Andrea" ) );
Subsystem subsystem = systemUser.getSubsystem();
assertTrue( Hibernate.isInitialized( subsystem ) );
statementInspector.assertExecutedCount( 2 );
statementInspector.assertNumberOfOccurrenceInQuery(
0,
"join",
0
);
statementInspector.assertNumberOfOccurrenceInQuery(
1,
"join",
0
);
statementInspector.clear();
assertThat( subsystem.getId(), is( "1" ) );
assertThat( subsystem.getDescription(), is( "Linux" ) );
assertThat( systemUser.getUsername(), is( "admin" ) );
statementInspector.assertExecutedCount( 0 );
}
);
}
@Test
public void testHql2(SessionFactoryScope scope) {
SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction(
session -> {
// intentionally set the Subsystem description to "Linux6", only the Subsystem.id value is used for the parameter binding
PK pk = new PK( new Subsystem( "1", "Linux6" ), "admin" );
SystemUser systemUser = session.createQuery(
"from SystemUser s where s.id = :id",
SystemUser.class
).setParameter( "id", pk ).getSingleResult();
assertThat( systemUser.getName(), is( "Andrea" ) );
Subsystem subsystem = systemUser.getSubsystem();
assertTrue( Hibernate.isInitialized( subsystem ) );
statementInspector.assertExecutedCount( 2 );
statementInspector.assertNumberOfOccurrenceInQuery(
0,
"join",
0
);
statementInspector.assertNumberOfOccurrenceInQuery(
1,
"join",
0
);
statementInspector.clear();
assertThat( subsystem.getId(), is( "1" ) );
assertThat( subsystem.getDescription(), is( "Linux" ) );
assertThat( systemUser.getUsername(), is( "admin" ) );
statementInspector.assertExecutedCount( 0 );
}
);
}
@Entity(name = "SystemUser")
@IdClass(PK.class)
public static | IdClassWithEagerManyToOneTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelInput.java | {
"start": 907,
"end": 3275
} | class ____ implements ToXContentObject, Writeable {
public static final String NAME = "trained_model_config_input";
public static final ParseField FIELD_NAMES = new ParseField("field_names");
public static final ConstructingObjectParser<TrainedModelInput, Void> LENIENT_PARSER = createParser(true);
public static final ConstructingObjectParser<TrainedModelInput, Void> STRICT_PARSER = createParser(false);
private final List<String> fieldNames;
public TrainedModelInput(List<String> fieldNames) {
this.fieldNames = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(fieldNames, FIELD_NAMES));
}
public TrainedModelInput(StreamInput in) throws IOException {
this.fieldNames = in.readCollectionAsImmutableList(StreamInput::readString);
}
@SuppressWarnings("unchecked")
private static ConstructingObjectParser<TrainedModelInput, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<TrainedModelInput, Void> parser = new ConstructingObjectParser<>(
NAME,
ignoreUnknownFields,
a -> new TrainedModelInput((List<String>) a[0])
);
parser.declareStringArray(ConstructingObjectParser.constructorArg(), FIELD_NAMES);
return parser;
}
public static TrainedModelInput fromXContent(XContentParser parser, boolean lenient) throws IOException {
return lenient ? LENIENT_PARSER.parse(parser, null) : STRICT_PARSER.parse(parser, null);
}
public List<String> getFieldNames() {
return fieldNames;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(fieldNames);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_NAMES.getPreferredName(), fieldNames);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TrainedModelInput that = (TrainedModelInput) o;
return Objects.equals(fieldNames, that.fieldNames);
}
@Override
public int hashCode() {
return Objects.hash(fieldNames);
}
}
| TrainedModelInput |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/convert/StringToOptionalConverter.java | {
"start": 1011,
"end": 1266
} | class ____ implements StringConverter<Optional> {
@Override
public Optional convert(String source) {
return ofNullable(source);
}
@Override
public int getPriority() {
return MIN_PRIORITY;
}
}
| StringToOptionalConverter |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/core/LauncherDiscoveryResult.java | {
"start": 1273,
"end": 3715
} | class ____ {
private final Map<TestEngine, EngineResultInfo> testEngineResults;
private final ConfigurationParameters configurationParameters;
private final OutputDirectoryCreator outputDirectoryCreator;
LauncherDiscoveryResult(Map<TestEngine, EngineResultInfo> testEngineResults,
ConfigurationParameters configurationParameters, OutputDirectoryCreator outputDirectoryCreator) {
this.testEngineResults = unmodifiableMap(new LinkedHashMap<>(testEngineResults));
this.configurationParameters = configurationParameters;
this.outputDirectoryCreator = outputDirectoryCreator;
}
public TestDescriptor getEngineTestDescriptor(TestEngine testEngine) {
return getEngineResult(testEngine).getRootDescriptor();
}
@API(status = INTERNAL, since = "1.13")
public List<DiscoveryIssue> getDiscoveryIssues(TestEngine testEngine) {
return getEngineResult(testEngine).getDiscoveryIssueNotifier().getAllIssues();
}
EngineResultInfo getEngineResult(TestEngine testEngine) {
return requireNonNull(this.testEngineResults.get(testEngine));
}
ConfigurationParameters getConfigurationParameters() {
return this.configurationParameters;
}
OutputDirectoryCreator getOutputDirectoryCreator() {
return this.outputDirectoryCreator;
}
public Collection<TestEngine> getTestEngines() {
return this.testEngineResults.keySet();
}
boolean containsCriticalIssuesOrContainsTests() {
return this.testEngineResults.values().stream() //
.anyMatch(EngineResultInfo::containsCriticalIssuesOrContainsTests);
}
Collection<TestDescriptor> getEngineTestDescriptors() {
return this.testEngineResults.values().stream() //
.map(EngineResultInfo::getRootDescriptor) //
.toList();
}
public LauncherDiscoveryResult withRetainedEngines(Predicate<? super TestDescriptor> predicate) {
Map<TestEngine, EngineResultInfo> prunedTestEngineResults = retainEngines(predicate);
if (prunedTestEngineResults.size() < this.testEngineResults.size()) {
return new LauncherDiscoveryResult(prunedTestEngineResults, this.configurationParameters,
this.outputDirectoryCreator);
}
return this;
}
private Map<TestEngine, EngineResultInfo> retainEngines(Predicate<? super TestDescriptor> predicate) {
var retainedEngines = new LinkedHashMap<>(this.testEngineResults);
retainedEngines.entrySet().removeIf(entry -> !predicate.test(entry.getValue().getRootDescriptor()));
return retainedEngines;
}
static | LauncherDiscoveryResult |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/interceptor/bindings/AdditionalInterceptorBindingsPredicateTest.java | {
"start": 3327,
"end": 3411
} | interface ____ {
String[] value();
}
static | ToBeBindingWithBindingField |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupsHandler.java | {
"start": 908,
"end": 1289
} | class ____ extends DeleteGroupsHandler {
public DeleteShareGroupsHandler(
LogContext logContext
) {
super(logContext, DeleteShareGroupsHandler.class);
}
@Override
public String apiName() {
return "deleteShareGroups";
}
@Override
public String displayName() {
return "DeleteShareGroups";
}
}
| DeleteShareGroupsHandler |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/annotation/Matches.java | {
"start": 1182,
"end": 1565
} | class ____ {
* @BeforeTemplate
* public <E> List<E> before(@Matches(IsNonNullMatcher.class) E e) {
* return Collections.singletonList(e);
* }
*
* @AfterTemplate
* public <E> List<E> after(E e) {
* return ImmutableList.of(e);
* }
* }
* }</pre>
*/
@Target({ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.SOURCE)
public @ | SingletonList |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java | {
"start": 3192,
"end": 15151
} | class ____<T, R extends ConnectRecord<R>> implements Runnable {
private static final Logger log = LoggerFactory.getLogger(WorkerTask.class);
private static final String THREAD_NAME_PREFIX = "task-thread-";
private final TaskStatus.Listener statusListener;
private final StatusBackingStore statusBackingStore;
protected final ConnectorTaskId id;
protected final ClassLoader loader;
protected final Time time;
private final CountDownLatch shutdownLatch = new CountDownLatch(1);
private final TaskMetricsGroup taskMetricsGroup;
private volatile TargetState targetState;
private volatile boolean failed;
private volatile boolean stopping; // indicates whether the Worker has asked the task to stop
private volatile boolean cancelled; // indicates whether the Worker has cancelled the task (e.g. because of slow shutdown)
private final ErrorHandlingMetrics errorMetrics;
protected final RetryWithToleranceOperator<T> retryWithToleranceOperator;
protected final TransformationChain<T, R> transformationChain;
private final Supplier<List<ErrorReporter<T>>> errorReportersSupplier;
protected final Function<ClassLoader, LoaderSwap> pluginLoaderSwapper;
protected final PluginMetricsImpl pluginMetrics;
public WorkerTask(ConnectorTaskId id,
TaskStatus.Listener statusListener,
TargetState initialState,
ClassLoader loader,
ConnectMetrics connectMetrics,
ErrorHandlingMetrics errorMetrics,
RetryWithToleranceOperator<T> retryWithToleranceOperator,
TransformationChain<T, R> transformationChain,
Supplier<List<ErrorReporter<T>>> errorReportersSupplier,
Time time,
StatusBackingStore statusBackingStore,
TaskPluginsMetadata pluginsMetadata,
Function<ClassLoader, LoaderSwap> pluginLoaderSwapper) {
this.id = id;
this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener, pluginsMetadata);
this.errorMetrics = errorMetrics;
this.statusListener = taskMetricsGroup;
this.loader = loader;
this.targetState = initialState;
this.failed = false;
this.stopping = false;
this.cancelled = false;
this.taskMetricsGroup.recordState(this.targetState);
this.retryWithToleranceOperator = retryWithToleranceOperator;
this.transformationChain = transformationChain;
this.errorReportersSupplier = errorReportersSupplier;
this.time = time;
this.statusBackingStore = statusBackingStore;
this.pluginLoaderSwapper = pluginLoaderSwapper;
this.pluginMetrics = connectMetrics.taskPluginMetrics(id);
}
public ConnectorTaskId id() {
return id;
}
public ClassLoader loader() {
return loader;
}
public PluginMetrics pluginMetrics() {
return pluginMetrics;
}
/**
* Initialize the task for execution.
*
* @param taskConfig initial configuration
*/
public abstract void initialize(TaskConfig taskConfig);
private void triggerStop() {
synchronized (this) {
stopping = true;
// wakeup any threads that are waiting for unpause
this.notifyAll();
}
}
/**
* Stop this task from processing messages. This method does not block, it only triggers
* shutdown. Use {@link #awaitStop} to block until completion.
*/
public void stop() {
triggerStop();
}
/**
* Cancel this task. This won't actually stop it, but it will prevent the state from being
* updated when it eventually does shutdown.
*/
public void cancel() {
cancelled = true;
retryWithToleranceOperator.triggerStop();
}
/**
* Wait for this task to finish stopping.
*
* @param timeoutMs time in milliseconds to await stop
* @return true if successful, false if the timeout was reached
*/
public boolean awaitStop(long timeoutMs) {
try {
return shutdownLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
}
/**
* Remove all metrics published by this task.
*/
public void removeMetrics() {
// Close quietly here so that we can be sure to close everything even if one attempt fails
Utils.closeQuietly(taskMetricsGroup::close, "Task metrics group");
Utils.closeQuietly(errorMetrics, "Error handling metrics");
}
// Visible for testing
void doStart() {
retryWithToleranceOperator.reporters(errorReportersSupplier.get());
initializeAndStart();
statusListener.onStartup(id);
}
protected abstract void initializeAndStart();
protected abstract void execute();
protected abstract void close();
protected abstract String taskVersion();
protected boolean isFailed() {
return failed;
}
protected boolean isStopping() {
// The target state should never be STOPPED, but if things go wrong and it somehow is,
// we handle that identically to a request to shut down the task
return stopping || targetState == TargetState.STOPPED;
}
protected boolean isCancelled() {
return cancelled;
}
// Visible for testing
void doClose() {
try {
close();
} catch (Throwable t) {
log.error("{} Task threw an uncaught and unrecoverable exception during shutdown", this, t);
throw t;
} finally {
Utils.closeQuietly(transformationChain, "transformation chain");
Utils.closeQuietly(retryWithToleranceOperator, "retry operator");
}
}
private void doRun() throws InterruptedException {
try {
synchronized (this) {
if (isStopping())
return;
if (targetState == TargetState.PAUSED) {
onPause();
if (!awaitUnpause()) return;
}
}
doStart();
execute();
} catch (Throwable t) {
failed = true;
if (cancelled) {
log.warn("{} After being scheduled for shutdown, the orphan task threw an uncaught exception. A newer instance of this task might be already running", this, t);
} else if (isStopping()) {
log.warn("{} After being scheduled for shutdown, task threw an uncaught exception.", this, t);
} else {
log.error("{} Task threw an uncaught and unrecoverable exception. Task is being killed and will not recover until manually restarted", this, t);
throw t;
}
} finally {
doClose();
}
}
private void onShutdown() {
synchronized (this) {
triggerStop();
// if we were cancelled, skip the status update since the task may have already been
// started somewhere else
if (!cancelled)
statusListener.onShutdown(id);
}
}
protected void onFailure(Throwable t) {
synchronized (this) {
triggerStop();
// if we were cancelled, skip the status update since the task may have already been
// started somewhere else
if (!cancelled)
statusListener.onFailure(id, t);
}
}
protected synchronized void onPause() {
statusListener.onPause(id);
}
protected synchronized void onResume() {
statusListener.onResume(id);
}
@Override
public void run() {
// Clear all MDC parameters, in case this thread is being reused
LoggingContext.clear();
try (LoggingContext loggingContext = LoggingContext.forTask(id())) {
String savedName = Thread.currentThread().getName();
try {
Thread.currentThread().setName(THREAD_NAME_PREFIX + id);
doRun();
onShutdown();
} catch (Throwable t) {
onFailure(t);
if (t instanceof Error)
throw (Error) t;
} finally {
Thread.currentThread().setName(savedName);
shutdownLatch.countDown();
}
}
}
public boolean shouldPause() {
return this.targetState == TargetState.PAUSED;
}
/**
* Await task resumption.
*
* @return true if the task's target state is not paused, false if the task is shutdown before resumption
* @throws InterruptedException
*/
protected boolean awaitUnpause() throws InterruptedException {
synchronized (this) {
while (targetState == TargetState.PAUSED) {
if (isStopping())
return false;
this.wait();
}
return true;
}
}
public void transitionTo(TargetState state) {
synchronized (this) {
// Ignore the state change if we are stopping.
// This has the consequence that, if we ever transition to the STOPPED target state (which
// should never happen since whole point of that state is that it comes with a complete
// shutdown of all the tasks for the connector), we will never be able to transition out of it.
// Since part of transitioning to the STOPPED state is that we shut down the task and all of
// its resources (Kafka clients, SMTs, etc.), this is a reasonable way to do things; otherwise,
// we'd have to re-instantiate all of those resources to be able to resume (or even just pause)
// the task .
if (isStopping()) {
log.debug("{} Ignoring request to transition stopped task {} to state {}", this, id, state);
return;
}
if (targetState == TargetState.STOPPED)
log.warn("{} Received unexpected request to transition task {} to state {}; will shut down in response", this, id, TargetState.STOPPED);
this.targetState = state;
this.notifyAll();
}
}
/**
* Include this topic to the set of active topics for the connector that this worker task
* is running. This information is persisted in the status backing store used by this worker.
*
* @param topic the topic to mark as active for this connector
*/
protected void recordActiveTopic(String topic) {
if (statusBackingStore.getTopic(id.connector(), topic) != null) {
// The topic is already recorded as active. No further action is required.
return;
}
statusBackingStore.put(new TopicStatus(topic, id, time.milliseconds()));
}
/**
* Record that offsets have been committed.
*
* @param duration the length of time in milliseconds for the commit attempt to complete
*/
protected void recordCommitSuccess(long duration) {
taskMetricsGroup.recordCommit(duration, true);
}
/**
* Record that offsets have been committed.
*
* @param duration the length of time in milliseconds for the commit attempt to complete
*/
protected void recordCommitFailure(long duration) {
taskMetricsGroup.recordCommit(duration, false);
}
/**
* Record that a batch of records has been processed.
*
* @param size the number of records in the batch
*/
protected void recordBatch(int size) {
taskMetricsGroup.recordBatch(size);
}
TaskMetricsGroup taskMetricsGroup() {
return taskMetricsGroup;
}
static | WorkerTask |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptorResolver.java | {
"start": 7092,
"end": 9522
} | class ____ {
private final TypeElement type;
private final List<ExecutableElement> constructors;
private final List<ExecutableElement> boundConstructors;
Bindable(TypeElement type, List<ExecutableElement> constructors, List<ExecutableElement> boundConstructors) {
this.type = type;
this.constructors = constructors;
this.boundConstructors = boundConstructors;
}
TypeElement getType() {
return this.type;
}
boolean isConstructorBindingEnabled() {
return !this.boundConstructors.isEmpty();
}
ExecutableElement getBindConstructor() {
if (this.boundConstructors.isEmpty()) {
return findBoundConstructor();
}
if (this.boundConstructors.size() == 1) {
return this.boundConstructors.get(0);
}
return null;
}
private ExecutableElement findBoundConstructor() {
ExecutableElement boundConstructor = null;
for (ExecutableElement candidate : this.constructors) {
if (!candidate.getParameters().isEmpty()) {
if (boundConstructor != null) {
return null;
}
boundConstructor = candidate;
}
}
return boundConstructor;
}
static Bindable of(TypeElement type, MetadataGenerationEnvironment env) {
List<ExecutableElement> constructors = ElementFilter.constructorsIn(type.getEnclosedElements());
List<ExecutableElement> boundConstructors = getBoundConstructors(type, env, constructors);
return new Bindable(type, constructors, boundConstructors);
}
private static List<ExecutableElement> getBoundConstructors(TypeElement type, MetadataGenerationEnvironment env,
List<ExecutableElement> constructors) {
ExecutableElement bindConstructor = deduceBindConstructor(type, constructors, env);
if (bindConstructor != null) {
return Collections.singletonList(bindConstructor);
}
return constructors.stream().filter(env::hasConstructorBindingAnnotation).toList();
}
private static ExecutableElement deduceBindConstructor(TypeElement type, List<ExecutableElement> constructors,
MetadataGenerationEnvironment env) {
if (constructors.size() == 1) {
ExecutableElement candidate = constructors.get(0);
if (!candidate.getParameters().isEmpty() && !env.hasAutowiredAnnotation(candidate)) {
if (type.getNestingKind() == NestingKind.MEMBER
&& candidate.getModifiers().contains(Modifier.PRIVATE)) {
return null;
}
return candidate;
}
}
return null;
}
}
}
| Bindable |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/jdk8/CompletionStageConsumer.java | {
"start": 1251,
"end": 2904
} | class ____<T> extends CompletableFuture<T>
implements MaybeObserver<T>, SingleObserver<T>, CompletableObserver {
final AtomicReference<Disposable> upstream;
final boolean hasDefault;
final T defaultItem;
public CompletionStageConsumer(boolean hasDefault, T defaultItem) {
this.hasDefault = hasDefault;
this.defaultItem = defaultItem;
this.upstream = new AtomicReference<>();
}
@Override
public void onSubscribe(@NonNull Disposable d) {
DisposableHelper.setOnce(upstream, d);
}
@Override
public void onSuccess(@NonNull T t) {
clear();
complete(t);
}
@Override
public void onError(Throwable t) {
clear();
if (!completeExceptionally(t)) {
RxJavaPlugins.onError(t);
}
}
@Override
public void onComplete() {
if (hasDefault) {
complete(defaultItem);
} else {
completeExceptionally(new NoSuchElementException("The source was empty"));
}
}
void cancelUpstream() {
DisposableHelper.dispose(upstream);
}
void clear() {
upstream.lazySet(DisposableHelper.DISPOSED);
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
cancelUpstream();
return super.cancel(mayInterruptIfRunning);
}
@Override
public boolean complete(T value) {
cancelUpstream();
return super.complete(value);
}
@Override
public boolean completeExceptionally(Throwable ex) {
cancelUpstream();
return super.completeExceptionally(ex);
}
}
| CompletionStageConsumer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/internal/AbstractSharedSessionContract.java | {
"start": 6331,
"end": 6966
} | class ____ implementations of {@link org.hibernate.SharedSessionContract} and
* {@link SharedSessionContractImplementor}. Intended for concrete implementations of
* {@link org.hibernate.Session} and {@link org.hibernate.StatelessSession}.
* <p>
* @implNote A {@code Session} or JPA {@code EntityManager} is a single-threaded object,
* which may not be called concurrently. Therefore, this implementation defines
* access to a number of instance state values in a manner that is not exactly
* thread-safe.
*
* @see SessionImpl
* @see StatelessSessionImpl
*
* @author Steve Ebersole
*/
abstract | for |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ser/PropertyBuilder.java | {
"start": 1054,
"end": 7126
} | class ____ NON_DEFAULT, but NOT if it is the
* global default OR per-property override.
*/
protected Object _defaultBean;
/**
* Default inclusion mode for properties of the POJO for which
* properties are collected; possibly overridden on
* per-property basis. Combines global inclusion defaults and
* per-type (annotation and type-override) inclusion overrides.
*/
final protected JsonInclude.Value _defaultInclusion;
/**
* Marker flag used to indicate that "real" default values are to be used
* for properties, as per per-type value inclusion of type <code>NON_DEFAULT</code>
*/
final protected boolean _useRealPropertyDefaults;
public PropertyBuilder(SerializationConfig config, BeanDescription beanDesc)
{
_config = config;
_beanDesc = beanDesc;
// 08-Sep-2016, tatu: This gets tricky, with 3 levels of definitions:
// (a) global default inclusion
// (b) per-type default inclusion (from annotation or config overrides;
// config override having precedence)
// (c) per-property override (from annotation on specific property or
// config overrides per type of property;
// annotation having precedence)
//
// and not only requiring merging, but also considering special handling
// for NON_DEFAULT in case of (b) (vs (a) or (c))
JsonInclude.Value inclPerType = JsonInclude.Value.merge(
beanDesc.findPropertyInclusion(JsonInclude.Value.empty()),
config.getDefaultPropertyInclusion(beanDesc.getBeanClass(),
JsonInclude.Value.empty()));
_defaultInclusion = JsonInclude.Value.merge(config.getDefaultPropertyInclusion(),
inclPerType);
_useRealPropertyDefaults = inclPerType.getValueInclusion() == JsonInclude.Include.NON_DEFAULT;
_annotationIntrospector = _config.getAnnotationIntrospector();
}
/*
/**********************************************************************
/* Public API
/**********************************************************************
*/
public Annotations getClassAnnotations() {
return _beanDesc.getClassAnnotations();
}
/**
* @param contentTypeSer Optional explicit type information serializer
* to use for contained values (only used for properties that are
* of container type)
*/
protected BeanPropertyWriter buildWriter(SerializationContext ctxt,
BeanPropertyDefinition propDef, JavaType declaredType, ValueSerializer<?> ser,
TypeSerializer typeSer, TypeSerializer contentTypeSer,
AnnotatedMember am, boolean defaultUseStaticTyping)
{
// do we have annotation that forces type to use (to declared type or its super type)?
JavaType serializationType;
try {
serializationType = findSerializationType(am, defaultUseStaticTyping, declaredType);
} catch (DatabindException e) {
if (propDef == null) {
return ctxt.reportBadDefinition(declaredType, ClassUtil.exceptionMessage(e));
}
return ctxt.reportBadPropertyDefinition(_beanDesc, propDef, ClassUtil.exceptionMessage(e));
}
// Container types can have separate type serializers for content (value / element) type
if (contentTypeSer != null) {
// 04-Feb-2010, tatu: Let's force static typing for collection, if there is
// type information for contents. Should work well (for JAXB case); can be
// revisited if this causes problems.
if (serializationType == null) {
// serializationType = TypeFactory.type(am.getGenericType(), _beanDesc.getType());
serializationType = declaredType;
}
JavaType ct = serializationType.getContentType();
// Not exactly sure why, but this used to occur; better check explicitly:
if (ct == null) {
ctxt.reportBadPropertyDefinition(_beanDesc, propDef,
"serialization type "+serializationType+" has no content");
}
serializationType = serializationType.withContentTypeHandler(contentTypeSer);
ct = serializationType.getContentType();
}
Object valueToSuppress = null;
boolean suppressNulls = false;
// 12-Jul-2016, tatu: [databind#1256] Need to make sure we consider type refinement
JavaType actualType = (serializationType == null) ? declaredType : serializationType;
// 17-Mar-2017: [databind#1522] Allow config override per property type
AnnotatedMember accessor = propDef.getAccessor(); // lgtm [java/dereferenced-value-may-be-null]
if (accessor == null) {
// neither Setter nor ConstructorParameter are expected here
return ctxt.reportBadPropertyDefinition(_beanDesc, propDef,
"could not determine property type");
}
Class<?> rawPropertyType = accessor.getRawType();
// 17-Aug-2016, tatu: Default inclusion covers global default (for all types), as well
// as type-default for enclosing POJO. What we need, then, is per-type default (if any)
// for declared property type... and finally property annotation overrides
JsonInclude.Value inclV = _config.getDefaultInclusion(actualType.getRawClass(),
rawPropertyType, _defaultInclusion);
// property annotation override
inclV = inclV.withOverrides(propDef.findInclusion());
JsonInclude.Include inclusion = inclV.getValueInclusion();
if (inclusion == JsonInclude.Include.USE_DEFAULTS) { // should not occur but...
inclusion = JsonInclude.Include.ALWAYS;
}
switch (inclusion) {
case NON_DEFAULT:
// 11-Nov-2015, tatu: This is tricky because semantics differ between cases,
// so that if enclosing | defines |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/startup/FileStartupCondition.java | {
"start": 1065,
"end": 1799
} | class ____ implements StartupCondition {
private final File file;
public FileStartupCondition(String name) {
ObjectHelper.notNullOrEmpty(name, "File");
this.file = new File(name);
}
@Override
public String getName() {
return "File";
}
@Override
public String getWaitMessage() {
return "Waiting for file: " + file;
}
@Override
public String getFailureMessage() {
return "File: " + file + " does not exist";
}
protected boolean fileExists(File file) {
return file.exists();
}
@Override
public boolean canContinue(CamelContext camelContext) throws Exception {
return fileExists(file);
}
}
| FileStartupCondition |
java | alibaba__nacos | plugin/control/src/main/java/com/alibaba/nacos/plugin/control/tps/barrier/LocalSimpleCountRateCounter.java | {
"start": 4855,
"end": 5141
} | class ____ {
AtomicLong count = new AtomicLong();
AtomicLong interceptedCount = new AtomicLong();
@Override
public String toString() {
return "{" + count + "|" + interceptedCount + '}';
}
}
}
| SlotCountHolder |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/TransformedClassesBuildItem.java | {
"start": 1486,
"end": 1579
} | class ____ {
private final String className;
/**
* The | TransformedClass |
java | apache__camel | tests/camel-itest/src/test/java/org/apache/camel/itest/tx/JmsToHttpTXTest.java | {
"start": 1499,
"end": 2666
} | class ____ {
@RegisterExtension
public static JmsServiceExtension jmsServiceExtension = JmsServiceExtension.createExtension();
// use uri to refer to our mock
@EndpointInject("mock:JmsToHttpRoute")
MockEndpoint mock;
// use the spring id to refer to the endpoint we should send data to
// notice using this id we can setup the actual endpoint in spring XML
// and we can even use spring ${ } property in the spring XML
@EndpointInject("ref:data")
private ProducerTemplate template;
// the ok response to expect
private String ok = "<?xml version=\"1.0\"?><reply><status>ok</status></reply>";
@Test
void testSendToTXJms() throws Exception {
// we assume 2 rollbacks
mock.expectedMessageCount(2);
// use requestBody to force a InOut message exchange pattern ( = request/reply)
// will send and wait for a response
Object out = template.requestBody("<?xml version=\"1.0\"?><request><status id=\"123\"/></request>");
// compare response
assertEquals(ok, out);
// assert the mock is correct
mock.assertIsSatisfied();
}
}
| JmsToHttpTXTest |
java | apache__kafka | storage/src/main/java/org/apache/kafka/server/purgatory/DelayedRemoteListOffsets.java | {
"start": 2096,
"end": 10294
} | class
____ static final KafkaMetricsGroup METRICS_GROUP = new KafkaMetricsGroup("kafka.server", "DelayedRemoteListOffsetsMetrics");
static final Meter AGGREGATE_EXPIRATION_METER = METRICS_GROUP.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS);
static final Map<TopicPartition, Meter> PARTITION_EXPIRATION_METERS = new ConcurrentHashMap<>();
private final int version;
private final Map<TopicPartition, ListOffsetsPartitionStatus> statusByPartition;
private final Consumer<TopicPartition> partitionOrException;
private final Consumer<Collection<ListOffsetsResponseData.ListOffsetsTopicResponse>> responseCallback;
public DelayedRemoteListOffsets(long delayMs,
int version,
Map<TopicPartition, ListOffsetsPartitionStatus> statusByPartition,
Consumer<TopicPartition> partitionOrException,
Consumer<Collection<ListOffsetsResponseData.ListOffsetsTopicResponse>> responseCallback) {
super(delayMs);
this.version = version;
this.statusByPartition = statusByPartition;
this.partitionOrException = partitionOrException;
this.responseCallback = responseCallback;
// Mark the status as completed, if there is no async task to track.
// If there is a task to track, then build the response as REQUEST_TIMED_OUT by default.
statusByPartition.forEach((topicPartition, status) -> {
status.completed(status.futureHolderOpt().isEmpty());
if (status.futureHolderOpt().isPresent()) {
status.responseOpt(Optional.of(buildErrorResponse(Errors.REQUEST_TIMED_OUT, topicPartition.partition())));
}
LOG.trace("Initial partition status for {} is {}", topicPartition, status);
});
}
/**
* Call-back to execute when a delayed operation gets expired and hence forced to complete.
*/
@Override
public void onExpiration() {
statusByPartition.forEach((topicPartition, status) -> {
if (!status.completed()) {
LOG.debug("Expiring list offset request for partition {} with status {}", topicPartition, status);
status.futureHolderOpt().ifPresent(futureHolder -> futureHolder.jobFuture().cancel(true));
recordExpiration(topicPartition);
}
});
}
/**
* Process for completing an operation; This function needs to be defined
* in subclasses and will be called exactly once in forceComplete()
*/
@Override
public void onComplete() {
Map<String, ListOffsetsResponseData.ListOffsetsTopicResponse> groupedByTopic = new HashMap<>();
statusByPartition.forEach((tp, status) -> {
ListOffsetsResponseData.ListOffsetsTopicResponse response = groupedByTopic.computeIfAbsent(tp.topic(), k ->
new ListOffsetsResponseData.ListOffsetsTopicResponse().setName(tp.topic()));
status.responseOpt().ifPresent(res -> response.partitions().add(res));
});
responseCallback.accept(groupedByTopic.values());
}
/**
* Try to complete the delayed operation by first checking if the operation
* can be completed by now. If yes execute the completion logic by calling
* forceComplete() and return true iff forceComplete returns true; otherwise return false
*/
@Override
public boolean tryComplete() {
AtomicBoolean completable = new AtomicBoolean(true);
statusByPartition.forEach((partition, status) -> {
if (!status.completed()) {
try {
partitionOrException.accept(partition);
} catch (ApiException e) {
status.futureHolderOpt().ifPresent(futureHolder -> {
futureHolder.jobFuture().cancel(false);
futureHolder.taskFuture().complete(new OffsetResultHolder.FileRecordsOrError(Optional.of(e), Optional.empty()));
});
}
status.futureHolderOpt().ifPresent(futureHolder -> {
if (futureHolder.taskFuture().isDone()) {
ListOffsetsResponseData.ListOffsetsPartitionResponse response;
try {
OffsetResultHolder.FileRecordsOrError taskFuture = futureHolder.taskFuture().get();
if (taskFuture.hasException()) {
response = buildErrorResponse(Errors.forException(taskFuture.exception().get()), partition.partition());
} else if (!taskFuture.hasTimestampAndOffset()) {
Errors error = status.maybeOffsetsError()
.map(e -> version >= 5 ? Errors.forException(e) : Errors.LEADER_NOT_AVAILABLE)
.orElse(Errors.NONE);
response = buildErrorResponse(error, partition.partition());
} else {
ListOffsetsResponseData.ListOffsetsPartitionResponse partitionResponse = buildErrorResponse(Errors.NONE, partition.partition());
FileRecords.TimestampAndOffset found = taskFuture.timestampAndOffset().get();
if (status.lastFetchableOffset().isPresent() && found.offset >= status.lastFetchableOffset().get()) {
if (status.maybeOffsetsError().isPresent()) {
Errors error = version >= 5 ? Errors.forException(status.maybeOffsetsError().get()) : Errors.LEADER_NOT_AVAILABLE;
partitionResponse.setErrorCode(error.code());
}
} else {
partitionResponse = new ListOffsetsResponseData.ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partition())
.setErrorCode(Errors.NONE.code())
.setTimestamp(found.timestamp)
.setOffset(found.offset);
if (found.leaderEpoch.isPresent() && version >= 4) {
partitionResponse.setLeaderEpoch(found.leaderEpoch.get());
}
}
response = partitionResponse;
}
} catch (InterruptedException | ExecutionException e) {
response = buildErrorResponse(Errors.forException(e), partition.partition());
}
status.responseOpt(Optional.of(response));
status.completed(true);
}
completable.set(completable.get() && futureHolder.taskFuture().isDone());
});
}
});
if (completable.get()) {
return forceComplete();
} else {
return false;
}
}
private ListOffsetsResponseData.ListOffsetsPartitionResponse buildErrorResponse(Errors e, int partitionIndex) {
return new ListOffsetsResponseData.ListOffsetsPartitionResponse()
.setPartitionIndex(partitionIndex)
.setErrorCode(e.code())
.setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP)
.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET);
}
private static void recordExpiration(TopicPartition partition) {
AGGREGATE_EXPIRATION_METER.mark();
PARTITION_EXPIRATION_METERS.computeIfAbsent(partition, tp -> METRICS_GROUP.newMeter("ExpiresPerSec",
"requests",
TimeUnit.SECONDS,
mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition()))))).mark();
}
}
| private |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/writer/Writable.java | {
"start": 335,
"end": 565
} | interface ____ {
/**
* Passed to {@link Writable}, providing access to additional data specific to a given implementation of the model
* serialization mechanism.
*
* @author Gunnar Morling
*/
| Writable |
java | apache__spark | core/src/main/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorter.java | {
"start": 5382,
"end": 5488
} | class ____'s used instead of Java's Iterator in order to facilitate inlining.
*/
public static final | that |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/RepeatableContainersTests.java | {
"start": 8825,
"end": 8925
} | interface ____ {
StandardRepeatable[] value();
}
@NonRepeatable("a")
static | InvalidWrongArrayType |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-panache-common/runtime/src/main/java/io/quarkus/hibernate/orm/panache/common/runtime/CommonPanacheQueryImpl.java | {
"start": 1894,
"end": 8533
} | interface ____ extends AutoCloseable {
@Override
void close();
}
private static final NonThrowingCloseable NO_FILTERS = new NonThrowingCloseable() {
@Override
public void close() {
}
};
private Object paramsArrayOrMap;
/**
* this is the HQL query expanded from the Panache-Query
*/
private String query;
/**
* this is the original Panache-Query, if any (can be null)
*/
private String originalQuery;
/**
* This is only used by the Spring Data JPA extension, due to Spring's Query annotation allowing a custom count query
* See https://docs.spring.io/spring-data/jpa/reference/jpa/query-methods.html#jpa.query-methods.at-query.native
* Otherwise we do not use this, and rely on ORM to generate count queries
*/
protected String customCountQueryForSpring;
private String orderBy;
private Session session;
private Page page;
private Long count;
private Range range;
private LockModeType lockModeType;
private Map<String, Object> hints;
private Map<String, Map<String, Object>> filters;
private Class<?> projectionType;
public CommonPanacheQueryImpl(Session session, String query, String originalQuery, String orderBy,
Object paramsArrayOrMap) {
this.session = session;
this.query = query;
this.originalQuery = originalQuery;
this.orderBy = orderBy;
this.paramsArrayOrMap = paramsArrayOrMap;
}
private CommonPanacheQueryImpl(CommonPanacheQueryImpl<?> previousQuery, String newQueryString,
String customCountQueryForSpring,
Class<?> projectionType) {
this.session = previousQuery.session;
this.query = newQueryString;
this.customCountQueryForSpring = customCountQueryForSpring;
this.orderBy = previousQuery.orderBy;
this.paramsArrayOrMap = previousQuery.paramsArrayOrMap;
this.page = previousQuery.page;
this.count = previousQuery.count;
this.range = previousQuery.range;
this.lockModeType = previousQuery.lockModeType;
this.hints = previousQuery.hints;
this.filters = previousQuery.filters;
this.projectionType = projectionType;
}
// Builder
public <T> CommonPanacheQueryImpl<T> project(Class<T> type) {
String selectQuery = query;
if (PanacheJpaUtil.isNamedQuery(query)) {
SelectionQuery<?> q = session.createNamedSelectionQuery(query.substring(1));
selectQuery = getQueryString(q);
}
String lowerCasedTrimmedQuery = PanacheJpaUtil.trimForAnalysis(selectQuery);
if (lowerCasedTrimmedQuery.startsWith("select new ")
|| lowerCasedTrimmedQuery.startsWith("select distinct new ")) {
throw new PanacheQueryException("Unable to perform a projection on a 'select [distinct]? new' query: " + query);
}
// If the query starts with a select clause, we pass it on to ORM which can handle that via a projection type
if (lowerCasedTrimmedQuery.startsWith("select ")) {
// I think projections do not change the result count, so we can keep the custom count query
return new CommonPanacheQueryImpl<>(this, query, customCountQueryForSpring, type);
}
// FIXME: this assumes the query starts with "FROM " probably?
// build select clause with a constructor expression
AtomicReference<String> cachedProjection = ProjectionQueryCache.get(type);
if (cachedProjection.get() == null) {
cachedProjection.set("SELECT " + getParametersFromClass(type, null));
}
String selectClause = cachedProjection.get();
// I think projections do not change the result count, so we can keep the custom count query
return new CommonPanacheQueryImpl<>(this, selectClause + selectQuery, customCountQueryForSpring, null);
}
private static StringBuilder getParametersFromClass(Class<?> type, String parentParameter) {
StringBuilder selectClause = new StringBuilder();
Constructor<?> constructor = getConstructor(type);
selectClause.append("new ").append(type.getName()).append(" (");
String parametersListStr = Stream.of(constructor.getParameters())
.map(parameter -> getParameterName(type, parentParameter, parameter))
.collect(Collectors.joining(","));
selectClause.append(parametersListStr);
selectClause.append(") ");
return selectClause;
}
private static Constructor<?> getConstructor(Class<?> type) {
Constructor<?>[] typeConstructors = type.getDeclaredConstructors();
//We start to look for constructors with @ProjectedConstructor
for (Constructor<?> typeConstructor : typeConstructors) {
if (typeConstructor.isAnnotationPresent(ProjectedConstructor.class)) {
return typeConstructor;
}
}
//If didn't find anything early,
//we try to find a constructor with parameters annotated with @ProjectedFieldName
for (Constructor<?> typeConstructor : typeConstructors) {
for (Parameter parameter : typeConstructor.getParameters()) {
if (parameter.isAnnotationPresent(ProjectedFieldName.class)) {
return typeConstructor;
}
}
}
//We fall back to the first constructor that has parameters
for (Constructor<?> typeConstructor : typeConstructors) {
Parameter[] parameters = typeConstructor.getParameters();
if (parameters.length == 0) {
continue;
}
return typeConstructor;
}
//If everything fails, we return the first available constructor
return typeConstructors[0];
}
private static String getParameterName(Class<?> parentType, String parentParameter, Parameter parameter) {
String parameterName;
// Check if constructor param is annotated with ProjectedFieldName
if (hasProjectedFieldName(parameter)) {
parameterName = getNameFromProjectedFieldName(parameter);
} else if (!parameter.isNamePresent()) {
throw new PanacheQueryException(
"Your application must be built with parameter names, this should be the default if" +
" using Quarkus project generation. Check the Maven or Gradle compiler configuration to include '-parameters'.");
} else {
// Check if | NonThrowingCloseable |
java | quarkusio__quarkus | integration-tests/cache/src/test/java/io/quarkus/it/cache/RestClientITCase.java | {
"start": 115,
"end": 169
} | class ____ extends RestClientTestCase {
}
| RestClientITCase |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockClusterAlertScriptEngine.java | {
"start": 1224,
"end": 1486
} | class ____ extends MockPainlessScriptEngine {
/**
* The plugin that creates this mock script engine. Overrides the original mock engine to inject this
* implementation instead of the parent class.
*/
public static | MockClusterAlertScriptEngine |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.