language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | square__javapoet | src/test/java/com/squareup/javapoet/TypeSpecTest.java | {
"start": 46810,
"end": 47346
} | class ____ {\n"
+ "}\n");
}
@Test public void varargs() throws Exception {
TypeSpec taqueria = TypeSpec.classBuilder("Taqueria")
.addMethod(MethodSpec.methodBuilder("prepare")
.addParameter(int.class, "workers")
.addParameter(Runnable[].class, "jobs")
.varargs()
.build())
.build();
assertThat(toString(taqueria)).isEqualTo(""
+ "package com.squareup.tacos;\n"
+ "\n"
+ "import java.lang.Runnable;\n"
+ "\n"
+ " | Menu |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/exceptions/TablePartitionedException.java | {
"start": 1063,
"end": 1515
} | class ____ extends Exception {
private static final String MSG = "Table %s in catalog %s is partitioned.";
public TablePartitionedException(String catalogName, ObjectPath tablePath) {
this(catalogName, tablePath, null);
}
public TablePartitionedException(String catalogName, ObjectPath tablePath, Throwable cause) {
super(String.format(MSG, tablePath.getFullName(), catalogName), cause);
}
}
| TablePartitionedException |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/ByteAssertBaseTest.java | {
"start": 742,
"end": 831
} | class ____ {@link ByteAssert} tests.
*
* @author Olivier Michallat
*/
public abstract | for |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/boot/xml/RecordableXmlMapping.java | {
"start": 689,
"end": 2824
} | class ____ {
// The following two properties are mutually exclusive: exactly one of them is non-null.
private final JaxbEntityMappingsImpl ormXmlRoot;
private final JaxbHbmHibernateMapping hbmXmlRoot;
private final SourceType originType;
private final String originName;
public static RecordableXmlMapping create(Binding<? extends JaxbBindableMappingDescriptor> binding) {
JaxbBindableMappingDescriptor root = binding.getRoot();
Origin origin = binding.getOrigin();
if (root instanceof JaxbEntityMappingsImpl) {
return new RecordableXmlMapping((JaxbEntityMappingsImpl) root, null, origin.getType(), origin.getName());
} else if (root instanceof JaxbHbmHibernateMapping) {
return new RecordableXmlMapping(null, (JaxbHbmHibernateMapping) root, origin.getType(), origin.getName());
} else {
throw new IllegalArgumentException("Unsupported mapping file root (unrecognized type): " + root);
}
}
@RecordableConstructor
public RecordableXmlMapping(JaxbEntityMappingsImpl ormXmlRoot, JaxbHbmHibernateMapping hbmXmlRoot, SourceType originType,
String originName) {
this.ormXmlRoot = ormXmlRoot;
this.hbmXmlRoot = hbmXmlRoot;
this.originType = originType;
this.originName = originName;
}
@Override
public String toString() {
return "RecordableXmlMapping{" +
"originName='" + originName + '\'' +
'}';
}
public JaxbEntityMappingsImpl getOrmXmlRoot() {
return ormXmlRoot;
}
public JaxbHbmHibernateMapping getHbmXmlRoot() {
return hbmXmlRoot;
}
public SourceType getOriginType() {
return originType;
}
public String getOriginName() {
return originName;
}
public Binding<?> toHibernateOrmBinding() {
Origin origin = new Origin(originType, originName);
if (ormXmlRoot != null) {
return new Binding<>(ormXmlRoot, origin);
} else {
return new Binding<>(hbmXmlRoot, origin);
}
}
}
| RecordableXmlMapping |
java | reactor__reactor-core | reactor-test/src/main/java/reactor/test/DefaultStepVerifierBuilder.java | {
"start": 76686,
"end": 77201
} | class ____<T> extends TaskEvent<T> {
final SubscriptionEvent<T> delegate;
SubscriptionTaskEvent(SubscriptionEvent<T> subscriptionEvent) {
super(null, subscriptionEvent.getDescription());
this.delegate = subscriptionEvent;
}
@Override
void run(DefaultVerifySubscriber<T> parent) throws Exception {
if (delegate.isTerminal()) {
parent.doCancel();
} else {
delegate.consume(Objects.requireNonNull(parent.get(), "subscription event null!"));
}
}
}
static final | SubscriptionTaskEvent |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/TestProtocolGeneric.java | {
"start": 2177,
"end": 9129
} | class ____ extends GenericResponder {
public TestResponder() {
super(PROTOCOL);
}
public Object respond(Message message, Object request) throws AvroRemoteException {
GenericRecord params = (GenericRecord) request;
if ("hello".equals(message.getName())) {
LOG.info("hello: " + params.get("greeting"));
return new Utf8("goodbye");
}
if ("echo".equals(message.getName())) {
Object record = params.get("record");
LOG.info("echo: " + record);
return record;
}
if ("echoBytes".equals(message.getName())) {
Object data = params.get("data");
LOG.info("echoBytes: " + data);
return data;
}
if ("error".equals(message.getName())) {
if (throwUndeclaredError)
throw new RuntimeException("foo");
GenericRecord error = new GenericData.Record(PROTOCOL.getType("TestError"));
error.put("message", new Utf8("an error"));
throw new AvroRemoteException(error);
}
throw new AvroRuntimeException("unexpected message: " + message.getName());
}
}
protected static SocketServer server;
protected static Transceiver client;
protected static GenericRequestor requestor;
@BeforeEach
public void testStartServer() throws Exception {
if (server != null)
return;
server = new SocketServer(new TestResponder(), new InetSocketAddress(0));
server.start();
client = new SocketTransceiver(new InetSocketAddress(server.getPort()));
requestor = new GenericRequestor(PROTOCOL, client);
}
@Test
void hello() throws Exception {
GenericRecord params = new GenericData.Record(PROTOCOL.getMessages().get("hello").getRequest());
params.put("greeting", new Utf8("bob"));
Utf8 response = (Utf8) requestor.request("hello", params);
assertEquals(new Utf8("goodbye"), response);
}
@Test
void echo() throws Exception {
GenericRecord record = new GenericData.Record(PROTOCOL.getType("TestRecord"));
record.put("name", new Utf8("foo"));
record.put("kind", new GenericData.EnumSymbol(PROTOCOL.getType("Kind"), "BAR"));
record.put("hash",
new GenericData.Fixed(PROTOCOL.getType("MD5"), new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }));
GenericRecord params = new GenericData.Record(PROTOCOL.getMessages().get("echo").getRequest());
params.put("record", record);
Object echoed = requestor.request("echo", params);
assertEquals(record, echoed);
}
@Test
void echoBytes() throws Exception {
Random random = new Random();
int length = random.nextInt(1024 * 16);
GenericRecord params = new GenericData.Record(PROTOCOL.getMessages().get("echoBytes").getRequest());
ByteBuffer data = ByteBuffer.allocate(length);
random.nextBytes(data.array());
data.flip();
params.put("data", data);
Object echoed = requestor.request("echoBytes", params);
assertEquals(data, echoed);
}
@Test
void error() throws Exception {
GenericRecord params = new GenericData.Record(PROTOCOL.getMessages().get("error").getRequest());
AvroRemoteException error = null;
try {
requestor.request("error", params);
} catch (AvroRemoteException e) {
error = e;
}
assertNotNull(error);
assertEquals("an error", ((GenericRecord) error.getValue()).get("message").toString());
}
@Test
void undeclaredError() throws Exception {
this.throwUndeclaredError = true;
RuntimeException error = null;
GenericRecord params = new GenericData.Record(PROTOCOL.getMessages().get("error").getRequest());
try {
requestor.request("error", params);
} catch (RuntimeException e) {
error = e;
} finally {
this.throwUndeclaredError = false;
}
assertNotNull(error);
assertTrue(error.toString().contains("foo"));
}
/**
* Construct and use a different protocol whose "hello" method has an extra
* argument to check that schema is sent to parse request.
*/
@Test
public void handshake() throws Exception {
Protocol protocol = new Protocol("Simple", "org.apache.avro.test");
List<Field> fields = new ArrayList<>();
fields.add(new Schema.Field("extra", Schema.create(Schema.Type.BOOLEAN), null, null));
fields.add(new Schema.Field("greeting", Schema.create(Schema.Type.STRING), null, null));
Protocol.Message message = protocol.createMessage("hello", null /* doc */, new LinkedHashMap<String, String>(),
Schema.createRecord(fields), Schema.create(Schema.Type.STRING), Schema.createUnion(new ArrayList<>()));
protocol.getMessages().put("hello", message);
try (Transceiver t = new SocketTransceiver(new InetSocketAddress(server.getPort()))) {
GenericRequestor r = new GenericRequestor(protocol, t);
GenericRecord params = new GenericData.Record(message.getRequest());
params.put("extra", Boolean.TRUE);
params.put("greeting", new Utf8("bob"));
Utf8 response = (Utf8) r.request("hello", params);
assertEquals(new Utf8("goodbye"), response);
}
}
/**
* Construct and use a different protocol whose "echo" response has an extra
* field to check that correct schema is used to parse response.
*/
@Test
public void responseChange() throws Exception {
List<Field> fields = new ArrayList<>();
for (Field f : PROTOCOL.getType("TestRecord").getFields())
fields.add(new Field(f.name(), f.schema(), null, null));
fields.add(new Field("extra", Schema.create(Schema.Type.BOOLEAN), null, true));
Schema record = Schema.createRecord("TestRecord", null, "org.apache.avro.test", false);
record.setFields(fields);
Protocol protocol = new Protocol("Simple", "org.apache.avro.test");
List<Field> params = new ArrayList<>();
params.add(new Field("record", record, null, null));
Protocol.Message message = protocol.createMessage("echo", null, new LinkedHashMap<String, String>(),
Schema.createRecord(params), record, Schema.createUnion(new ArrayList<>()));
protocol.getMessages().put("echo", message);
try (Transceiver t = new SocketTransceiver(new InetSocketAddress(server.getPort()))) {
GenericRequestor r = new GenericRequestor(protocol, t);
GenericRecord args = new GenericData.Record(message.getRequest());
GenericRecord rec = new GenericData.Record(record);
rec.put("name", new Utf8("foo"));
rec.put("kind", new GenericData.EnumSymbol(PROTOCOL.getType("Kind"), "BAR"));
rec.put("hash", new GenericData.Fixed(PROTOCOL.getType("MD5"),
new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }));
rec.put("extra", Boolean.TRUE);
args.put("record", rec);
GenericRecord response = (GenericRecord) r.request("echo", args);
assertEquals(rec, response);
}
}
@AfterAll
public static void testStopServer() throws Exception {
client.close();
server.close();
}
}
| TestResponder |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/guava/LinkedListMultimapTest.java | {
"start": 257,
"end": 593
} | class ____ extends TestCase {
public void test_for_multimap() throws Exception {
LinkedListMultimap map = LinkedListMultimap.create();
map.put("name", "a");
map.put("name", "b");
String json = JSON.toJSONString(map);
assertEquals("{\"name\":[\"a\",\"b\"]}", json);
}
}
| LinkedListMultimapTest |
java | google__guava | android/guava/src/com/google/common/cache/LocalCache.java | {
"start": 45551,
"end": 46520
} | class ____<K, V> extends SoftReference<V>
implements ValueReference<K, V> {
final ReferenceEntry<K, V> entry;
SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
super(referent, queue);
this.entry = entry;
}
@Override
public int getWeight() {
return 1;
}
@Override
public ReferenceEntry<K, V> getEntry() {
return entry;
}
@Override
public void notifyNewValue(V newValue) {}
@Override
public ValueReference<K, V> copyFor(
ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
return new SoftValueReference<>(queue, value, entry);
}
@Override
public boolean isLoading() {
return false;
}
@Override
public boolean isActive() {
return true;
}
@Override
public V waitForValue() {
return get();
}
}
/** References a strong value. */
private static | SoftValueReference |
java | quarkusio__quarkus | extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/binder/kafka/KafkaStreamsEventObserver.java | {
"start": 706,
"end": 2139
} | class ____ {
private static final Logger log = Logger.getLogger(KafkaStreamsEventObserver.class);
final MeterRegistry registry = Metrics.globalRegistry;
KafkaStreamsMetrics kafkaStreamsMetrics;
/**
* Manage bind/close of KafkaStreamsMetrics for the specified KafkaStreams client.
* If the kafkaStreams has not been seen before, it will be bound to the
* Micrometer registry and instrumented using a Kafka MeterBinder.
* If the kafkaStreams has been seen before, the MeterBinder will be closed.
*
* @param kafkaStreams Observed KafkaStreams instance
*/
public synchronized void kafkaStreamsCreated(@Observes KafkaStreams kafkaStreams) {
if (kafkaStreamsMetrics == null) {
kafkaStreamsMetrics = new KafkaStreamsMetrics(kafkaStreams);
try {
kafkaStreamsMetrics.bindTo(registry);
} catch (Throwable t) {
log.warnf(t, "Unable to register metrics for KafkaStreams %s", kafkaStreams);
tryToClose(kafkaStreamsMetrics);
}
} else {
tryToClose(kafkaStreamsMetrics);
}
}
void onStop(@Observes ShutdownEvent event) {
tryToClose(kafkaStreamsMetrics);
}
void tryToClose(AutoCloseable c) {
try {
c.close();
} catch (Exception e) {
// intentionally empty
}
}
}
| KafkaStreamsEventObserver |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/integer/AtomicIntegerAssert_customComparator_Test.java | {
"start": 861,
"end": 1094
} | class ____ {
@Test
void should_honor_custom_comparator() {
assertThat(new AtomicInteger(1)).usingComparator(new AbsValueComparator<AtomicInteger>()).hasValueLessThanOrEqualTo(-1);
}
}
| AtomicIntegerAssert_customComparator_Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 64041,
"end": 64107
} | interface ____ all of the Reader options
*/
public static | for |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-dependency-resolution/src/main/java/org/apache/maven/plugin/coreit/AbstractDependencyMojo.java | {
"start": 5223,
"end": 5918
} | class ____: " + file);
BufferedWriter writer = null;
try {
file.getParentFile().mkdirs();
writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
if (classPath != null) {
for (Object aClassPath : classPath) {
String element = aClassPath.toString();
writer.write(stripLeadingDirs(element, significantPathLevels));
writer.newLine();
getLog().info("[MAVEN-CORE-IT-LOG] " + element);
}
}
} catch (IOException e) {
throw new MojoExecutionException("Failed to write | path |
java | quarkusio__quarkus | independent-projects/qute/debug/src/test/java/io/quarkus/qute/debug/client/TracingMessageConsumer.java | {
"start": 9911,
"end": 10613
} | class ____ {
final String method;
final Instant start;
public RequestMetadata(String method, Instant start) {
this.method = method;
this.start = start;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
RequestMetadata that = (RequestMetadata) o;
return Objects.equals(method, that.method) && Objects.equals(start, that.start);
}
@Override
public int hashCode() {
return Objects.hash(method, start);
}
}
}
| RequestMetadata |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/http/client/handler/RestResultResponseHandler.java | {
"start": 1111,
"end": 2001
} | class ____<T> extends AbstractResponseHandler<T> {
@Override
@SuppressWarnings("unchecked")
public HttpRestResult<T> convertResult(HttpClientResponse response, Type responseType) throws Exception {
final Header headers = response.getHeaders();
T extractBody = JacksonUtils.toObj(response.getBody(), responseType);
HttpRestResult<T> httpRestResult = convert((RestResult<T>) extractBody);
httpRestResult.setHeader(headers);
return httpRestResult;
}
private static <T> HttpRestResult<T> convert(RestResult<T> restResult) {
HttpRestResult<T> httpRestResult = new HttpRestResult<>();
httpRestResult.setCode(restResult.getCode());
httpRestResult.setData(restResult.getData());
httpRestResult.setMessage(restResult.getMessage());
return httpRestResult;
}
}
| RestResultResponseHandler |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestConfTest.java | {
"start": 1065,
"end": 6923
} | class ____ {
@Test
public void testEmptyConfiguration() {
String conf = "<configuration/>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertTrue(errors.isEmpty());
}
@Test
public void testValidConfiguration() {
String conf = "<configuration>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "<value>bar</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertTrue(errors.isEmpty());
}
@Test
public void testSourceDuplicationIsValid() {
String conf = "<configuration>\n"
+ "<property source='a'>\n"
+ "<name>foo</name>\n"
+ "<value>bar</value>\n"
+ "<source>b</source>\n"
+ "<source>c</source>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertTrue(errors.isEmpty());
}
@Test
public void testEmptyInput() {
String conf = "";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertTrue(errors.get(0).startsWith("bad conf file: "));
}
@Test
public void testInvalidFormat() {
String conf = "<configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertTrue(errors.get(0).startsWith("bad conf file: "));
}
@Test
public void testRootElementNotConfiguration() {
String conf = "<configurations/>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("bad conf file: top-level element not <configuration>", errors.get(0));
}
@Test
public void testSubElementNotProperty() {
String conf = "<configuration>\n"
+ "<foo/>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: element not <property>", errors.get(0));
}
@Test
public void testPropertyHasNoName() {
String conf ="<configuration>\n"
+ "<property>\n"
+ "<value>foo</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: <property> has no <name>", errors.get(0));
}
@Test
public void testPropertyHasEmptyName() {
String conf = "<configuration>\n"
+ "<property>\n"
+ "<name></name>\n"
+ "<value>foo</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: <property> has an empty <name>", errors.get(0));
}
@Test
public void testPropertyHasNoValue() {
String conf ="<configuration>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: <property> has no <value>", errors.get(0));
}
@Test
public void testPropertyHasEmptyValue() {
String conf = "<configuration>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "<value></value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertTrue(errors.isEmpty());
}
@Test
public void testPropertyHasDuplicatedAttributeAndElement() {
String conf = "<configuration>\n"
+ "<property name='foo'>\n"
+ "<name>bar</name>\n"
+ "<value>baz</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: <property> has duplicated <name>s", errors.get(0));
}
@Test
public void testPropertyHasDuplicatedElements() {
String conf = "<configuration>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "<name>bar</name>\n"
+ "<value>baz</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2: <property> has duplicated <name>s", errors.get(0));
}
@Test
public void testDuplicatedProperties() {
String conf = "<configuration>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "<value>bar</value>\n"
+ "</property>\n"
+ "<property>\n"
+ "<name>foo</name>\n"
+ "<value>baz</value>\n"
+ "</property>\n"
+ "</configuration>";
ByteArrayInputStream bais = new ByteArrayInputStream(conf.getBytes());
List<String> errors = ConfTest.checkConf(bais);
assertEquals(1, errors.size());
assertEquals("Line 2, 6: duplicated <property>s for foo", errors.get(0));
}
}
| TestConfTest |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/info/SslInfo.java | {
"start": 3806,
"end": 4663
} | class ____ {
private final String alias;
private final List<CertificateInfo> certificates;
CertificateChainInfo(KeyStore keyStore, String alias) {
this.alias = alias;
this.certificates = extractCertificates(keyStore, alias);
}
private List<CertificateInfo> extractCertificates(KeyStore keyStore, String alias) {
try {
Certificate[] certificates = keyStore.getCertificateChain(alias);
return (!ObjectUtils.isEmpty(certificates))
? Arrays.stream(certificates).map(CertificateInfo::new).toList() : Collections.emptyList();
}
catch (KeyStoreException ex) {
return Collections.emptyList();
}
}
public String getAlias() {
return this.alias;
}
public List<CertificateInfo> getCertificates() {
return this.certificates;
}
}
/**
* Info about a certificate.
*/
public final | CertificateChainInfo |
java | apache__logging-log4j2 | log4j-perf-test/src/main/java/org/apache/logging/log4j/perf/appender/StringAppender.java | {
"start": 1474,
"end": 3634
} | class ____ extends AbstractAppender {
private String message;
public StringAppender(final String name, final Filter filter, final Layout<? extends Serializable> layout) {
super(name, filter, layout, true, Property.EMPTY_ARRAY);
if (layout != null && !(layout instanceof SerializedLayout)) {
final byte[] bytes = layout.getHeader();
if (bytes != null) {
message = new String(bytes);
}
}
}
@Override
public void append(final LogEvent event) {
final Layout<? extends Serializable> layout = getLayout();
if (layout instanceof SerializedLayout) {
final byte[] header = layout.getHeader();
final byte[] content = layout.toByteArray(event);
final byte[] record = new byte[header.length + content.length];
System.arraycopy(header, 0, record, 0, header.length);
System.arraycopy(content, 0, record, header.length, content.length);
message = new String(record);
} else {
message = new String(layout.toByteArray(event));
}
}
@Override
public boolean stop(final long timeout, final TimeUnit timeUnit) {
setStopped();
return true;
}
public StringAppender clear() {
message = null;
return this;
}
public String getMessage() {
return message;
}
public static StringAppender createAppender(
final String name, final Layout<? extends Serializable> layout, final Filter filter) {
return new StringAppender(name, filter, layout);
}
/**
* Gets the named StringAppender if it has been registered.
*
* @param name the name of the ListAppender
* @return the named StringAppender or {@code null} if it does not exist
*/
public static StringAppender getStringAppender(final String name) {
return ((StringAppender)
(LoggerContext.getContext(false)).getConfiguration().getAppender(name));
}
@Override
public String toString() {
return "StringAppender message=" + message;
}
}
| StringAppender |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsAction.java | {
"start": 9891,
"end": 12003
} | class ____ {
private final String jobId;
private final String snapshotId;
private SnapshotUpgradeState upgradeState;
private DiscoveryNode node;
private String assignmentExplanation;
public Builder(String jobId, String snapshotId) {
this.jobId = jobId;
this.snapshotId = snapshotId;
}
public String getJobId() {
return jobId;
}
public String getSnapshotId() {
return snapshotId;
}
public Response.JobModelSnapshotUpgradeStats.Builder setUpgradeState(SnapshotUpgradeState upgradeState) {
this.upgradeState = Objects.requireNonNull(upgradeState);
return this;
}
public Response.JobModelSnapshotUpgradeStats.Builder setNode(DiscoveryNode node) {
this.node = node;
return this;
}
public Response.JobModelSnapshotUpgradeStats.Builder setAssignmentExplanation(String assignmentExplanation) {
this.assignmentExplanation = assignmentExplanation;
return this;
}
public Response.JobModelSnapshotUpgradeStats build() {
return new Response.JobModelSnapshotUpgradeStats(jobId, snapshotId, upgradeState, node, assignmentExplanation);
}
}
}
public Response(QueryPage<Response.JobModelSnapshotUpgradeStats> upgradeStats) {
super(upgradeStats);
}
public Response(StreamInput in) throws IOException {
super(in);
}
public QueryPage<Response.JobModelSnapshotUpgradeStats> getResponse() {
return getResources();
}
@Override
protected Reader<Response.JobModelSnapshotUpgradeStats> getReader() {
return Response.JobModelSnapshotUpgradeStats::new;
}
}
}
| Builder |
java | google__guava | android/guava-tests/test/com/google/common/collect/BenchmarkHelpers.java | {
"start": 11752,
"end": 12384
} | enum ____ implements InternerImplEnum {
WeakInternerImpl {
@Override
public <E> Interner<E> create(Collection<E> contents) {
Interner<E> interner = Interners.newWeakInterner();
for (E e : contents) {
E unused = interner.intern(e);
}
return interner;
}
},
StrongInternerImpl {
@Override
public <E> Interner<E> create(Collection<E> contents) {
Interner<E> interner = Interners.newStrongInterner();
for (E e : contents) {
E unused = interner.intern(e);
}
return interner;
}
};
}
public | InternerImpl |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/search/arguments/CreateArgs.java | {
"start": 2429,
"end": 18237
} | class ____<K, V> {
private final CreateArgs<K, V> instance = new CreateArgs<>();
/**
* Set the {@link TargetType} type for the index. Defaults to {@link TargetType#HASH}.
*
* @param targetType the target type
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> on(TargetType targetType) {
instance.on = Optional.of(targetType);
return this;
}
/**
* Add a prefix to the index. You can add several prefixes to index. Default setting is * (all keys).
*
* @param prefix the prefix
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> withPrefix(K prefix) {
instance.prefixes.add(prefix);
return this;
}
/**
* Add a list of prefixes to the index. You can add several prefixes to index. Default setting is * (all keys).
*
* @param prefixes a {@link List} of prefixes
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> withPrefixes(List<K> prefixes) {
instance.prefixes.addAll(prefixes);
return this;
}
/**
* Set a filter for the index. Default setting is to have no filter.
* <p/>
* It is possible to use @__key to access the key that was just added/changed. A field can be used to set field name by
* passing 'FILTER @indexName=="myindexname"'.
*
* @param filter a filter expression with the full RediSearch aggregation expression language
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/query/">RediSearch Query</a>
*/
public Builder<K, V> filter(V filter) {
instance.filter = Optional.of(filter);
return this;
}
/**
* Set the default language for the documents in the index. The default setting is English.
*
* @param language the default language
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> defaultLanguage(DocumentLanguage language) {
instance.defaultLanguage = Optional.of(language);
return this;
}
/**
* Set the field that contains the language setting for the documents in the index. The default setting is to have no
* language field.
*
* @param field the language field
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href=
* "https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/stemming/">Stemming</a>
*/
public Builder<K, V> languageField(K field) {
instance.languageField = Optional.of(field);
return this;
}
/**
* Set the default score for the documents in the index. The default setting is 1.0.
*
* @param score the default score
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/scoring/">Scoring</a>
*/
public Builder<K, V> defaultScore(double score) {
instance.defaultScore = OptionalDouble.of(score);
return this;
}
/**
* Set the field that contains the score setting for the documents in the index. The default setting is a score of 1.0.
*
* @param field the score field
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/scoring/">Scoring</a>
*/
public Builder<K, V> scoreField(K field) {
instance.scoreField = Optional.of(field);
return this;
}
/**
* Set the field that contains the payload setting for the documents in the index. The default setting is to have no
* payload field.
* <p/>
* This should be a document attribute that you use as a binary safe payload string to the document that can be
* evaluated at query time by a custom scoring function or retrieved to the client
*
* @param field the payload field
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/scoring/">Scoring</a>
*/
public Builder<K, V> payloadField(K field) {
instance.payloadField = Optional.of(field);
return this;
}
/**
* Set the maximum number of text fields in the index. The default setting is to have no limit.
* <p/>
* Forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional
* attributes (beyond 32) using FT.ALTER. For efficiency, RediSearch encodes indexes differently if they are created
* with less than 32 text attributes.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> maxTextFields() {
instance.maxTextFields = true;
return this;
}
/**
* Set the temporary index expiration time in seconds. The default setting is to have no expiration time.
* <p/>
* Creates a lightweight temporary index that expires after a specified period of inactivity, in seconds. The internal
* idle timer is reset whenever the index is searched or added to. Because such indexes are lightweight, you can create
* thousands of such indexes without negative performance implications and, therefore, you should consider using
* {@link Builder#skipInitialScan()} to avoid costly scanning.
* <p/>
* Warning: When temporary indexes expire, they drop all the records associated with them. FT.DROPINDEX was introduced
* with a default of not deleting docs and a DD flag that enforced deletion. However, for temporary indexes, documents
* are deleted along with the index. Historically, RediSearch used an FT.ADD command, which made a connection between
* the document and the index. Then, FT.DROP, also a hystoric command, deleted documents by default. In version 2.x,
* RediSearch indexes hashes and JSONs, and the dependency between the index and documents no longer exists.
*
* @param seconds the temporary index expiration time in seconds
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> temporary(long seconds) {
instance.temporary = OptionalLong.of(seconds);
return this;
}
/**
* Set the no offsets flag. The default setting is to have offsets.
* <p/>
* It saves memory, but does not allow exact searches or highlighting. It implies {@link Builder#noHighlighting()} is
* set to true.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> noOffsets() {
instance.noOffsets = true;
return this;
}
/**
* Set the no highlighting flag. The default setting is to have highlighting.
* <p/>
* Conserves storage space and memory by disabling highlighting support. If set, the corresponding byte offsets for term
* positions are not stored. NOHL is also implied by NOOFFSETS.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> noHighlighting() {
instance.noHighlight = true;
return this;
}
/**
* Set the no fields flag. The default setting is to have fields.
* <p/>
* Does not store attribute bits for each term. It saves memory, but it does not allow filtering by specific attributes.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> noFields() {
instance.noFields = true;
return this;
}
/**
* Set the no frequency flag. The default setting is to have frequencies.
* <p/>
* Does not store the frequency of each term. It saves memory, but it does not allow sorting by frequency of a given
* term.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> noFrequency() {
instance.noFrequency = true;
return this;
}
/**
* Set the skip initial scan flag. The default setting is to scan initially.
*
* @return the instance of the current {@link Builder} for the purpose of method chaining
*/
public Builder<K, V> skipInitialScan() {
instance.skipInitialScan = true;
return this;
}
/**
* Set the index with a custom stopword list, to be ignored during indexing and search time.
* <p/>
* If not set, FT.CREATE takes the default list of stopwords. If {count} is set to 0, the index does not have stopwords.
*
* @param stopWords a list of stop words
* @return the instance of the current {@link Builder} for the purpose of method chaining
* @see <a href="https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/stopwords/">Stop
* words</a>
*/
public Builder<K, V> stopWords(List<V> stopWords) {
instance.stopWords = Optional.of(stopWords);
return this;
}
public CreateArgs<K, V> build() {
return instance;
}
}
/**
* Get the target type for the index.
*
* @return the target type
* @see TargetType
* @see Builder#on(TargetType)
*/
public Optional<TargetType> getOn() {
return on;
}
/**
* Get the prefixes for the index.
*
* @return the prefixes
* @see Builder#withPrefix(Object)
* @see Builder#withPrefixes(List)
*/
public List<K> getPrefixes() {
return prefixes;
}
/**
* Get the filter for the index.
*
* @return the filter
* @see Builder#filter(Object)
*/
public Optional<V> getFilter() {
return filter;
}
/**
* Get the default language for the documents in the index.
*
* @return the default language
* @see Builder#defaultLanguage(DocumentLanguage)
*/
public Optional<DocumentLanguage> getDefaultLanguage() {
return defaultLanguage;
}
/**
* Get the field that contains the language setting for the documents in the index.
*
* @return the language field
* @see Builder#languageField(Object)
*/
public Optional<K> getLanguageField() {
return languageField;
}
/**
* Get the default score for the documents in the index.
*
* @return the default score
* @see Builder#defaultScore(double)
*/
public OptionalDouble getDefaultScore() {
return defaultScore;
}
/**
* Get the field that contains the score setting for the documents in the index.
*
* @return the score field
* @see Builder#scoreField(Object)
*/
public Optional<K> getScoreField() {
return scoreField;
}
/**
* Get the field that contains the payload setting for the documents in the index.
*
* @return the payload field
* @see Builder#payloadField(Object)
*/
public Optional<K> getPayloadField() {
return payloadField;
}
/**
* Get the maximum number of text fields in the index.
*
* @return the maximum number of text fields
* @see Builder#maxTextFields()
*/
public boolean isMaxTextFields() {
return maxTextFields;
}
/**
* Get the temporary index expiration time in seconds.
*
* @return the temporary index expiration time in seconds
* @see Builder#temporary(long)
*/
public OptionalLong getTemporary() {
return temporary;
}
/**
* Get the no offsets flag.
*
* @return the no offsets flag
* @see Builder#noOffsets()
*/
public boolean isNoOffsets() {
return noOffsets;
}
/**
* Get the no highlighting flag.
*
* @return the no highlighting flag
* @see Builder#noHighlighting()
*/
public boolean isNoHighlight() {
return noHighlight;
}
/**
* Get the no fields flag.
*
* @return the no fields flag
* @see Builder#noFields()
*/
public boolean isNoFields() {
return noFields;
}
/**
* Get the no frequency flag.
*
* @return the no frequency flag
* @see Builder#noFrequency()
*/
public boolean isNoFrequency() {
return noFrequency;
}
/**
* Get the skip initial scan flag.
*
* @return the skip initial scan flag
* @see Builder#skipInitialScan()
*/
public boolean isSkipInitialScan() {
return skipInitialScan;
}
/**
* Get the stop words for the index.
*
* @return the stop words
* @see Builder#stopWords(List)
*/
public Optional<List<V>> getStopWords() {
return stopWords;
}
/**
* Build a {@link CommandArgs} object that contains all the arguments.
*
* @param args the {@link CommandArgs} object
*/
public void build(CommandArgs<K, V> args) {
on.ifPresent(targetType -> args.add(ON).add(targetType.name()));
if (!prefixes.isEmpty()) {
args.add(PREFIX).add(prefixes.size());
prefixes.forEach(p -> args.add(p.toString()));
}
filter.ifPresent(filter -> args.add(FILTER).addValue(filter));
defaultLanguage.ifPresent(language -> args.add(LANGUAGE).add(language.toString()));
languageField.ifPresent(field -> args.add(LANGUAGE_FIELD).addKey(field));
defaultScore.ifPresent(score -> args.add(SCORE).add(score));
scoreField.ifPresent(field -> args.add(SCORE_FIELD).addKey(field));
payloadField.ifPresent(field -> args.add(PAYLOAD_FIELD).addKey(field));
if (maxTextFields) {
args.add(MAXTEXTFIELDS);
}
temporary.ifPresent(seconds -> args.add(TEMPORARY).add(seconds));
if (noOffsets) {
args.add(NOOFFSETS);
}
if (noHighlight) {
args.add(NOHL);
}
if (noFields) {
args.add(NOFIELDS);
}
if (noFrequency) {
args.add(NOFREQS);
}
if (skipInitialScan) {
args.add(SKIPINITIALSCAN);
}
stopWords.ifPresent(words -> {
args.add(STOPWORDS).add(words.size());
words.forEach(args::addValue);
});
}
}
| Builder |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/producer/InterceptionProxySubclassNormalScopedTest.java | {
"start": 1116,
"end": 2159
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(MyBinding.class, MyInterceptor.class, MyProducer.class);
@Test
public void test() {
MyNonbean nonbean = Arc.container().instance(MyNonbean.class).get();
assertEquals("intercepted: hello", nonbean.hello());
assertInstanceOf(ClientProxy.class, nonbean);
assertNotNull(ClientProxy.unwrap(nonbean));
assertNotSame(nonbean, ClientProxy.unwrap(nonbean));
MyNonbean unwrapped = ClientProxy.unwrap(nonbean);
assertInstanceOf(InterceptionProxySubclass.class, unwrapped);
assertNotNull(InterceptionProxySubclass.unwrap(unwrapped));
assertNotSame(unwrapped, InterceptionProxySubclass.unwrap(unwrapped));
assertNotSame(nonbean, InterceptionProxySubclass.unwrap(unwrapped));
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.CONSTRUCTOR })
@InterceptorBinding
@ | InterceptionProxySubclassNormalScopedTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/logging/ConsoleThrowablePatternConverter.java | {
"start": 1532,
"end": 3706
} | class
____ final boolean enabled;
private ConsoleThrowablePatternConverter(String[] options, Configuration config, boolean enabled) {
super("ConsoleThrowablePatternConverter", "throwable", options, config);
this.enabled = enabled;
}
/**
* Gets an instance of the class.
*
* @param config The current Configuration.
* @return instance of class.
*/
public static ConsoleThrowablePatternConverter newInstance(final Configuration config, final String[] options) {
return newInstance(config, options, BootstrapInfo.getConsole() != null);
}
// package private for tests
static ConsoleThrowablePatternConverter newInstance(final Configuration config, final String[] options, boolean enabled) {
return new ConsoleThrowablePatternConverter(options, config, enabled);
}
@Override
public void format(final LogEvent event, final StringBuilder toAppendTo) {
Throwable error = event.getThrown();
if (enabled == false || error == null) {
super.format(event, toAppendTo);
return;
}
if (error instanceof StartupException e) {
error = e.getCause();
toAppendTo.append("\n\nElasticsearch failed to startup normally.\n\n");
}
appendShortStacktrace(error, toAppendTo);
if (error instanceof CreationException) {
toAppendTo.append("There were problems initializing Guice. See log for more details.");
} else {
toAppendTo.append("\n\nSee logs for more details.\n");
}
}
// prints a very truncated stack trace, leaving the rest of the details for the log
private static void appendShortStacktrace(Throwable error, StringBuilder toAppendTo) {
toAppendTo.append(error.getClass().getName());
toAppendTo.append(": ");
toAppendTo.append(error.getMessage());
var stacktrace = error.getStackTrace();
int len = Math.min(stacktrace.length, 5);
for (int i = 0; i < len; ++i) {
toAppendTo.append("\n\tat ");
toAppendTo.append(stacktrace[i].toString());
}
}
}
| private |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/FileUtilsTest.java | {
"start": 26379,
"end": 26657
} | class ____ extends CheckedThread {
private final File target;
Deleter(File target) {
this.target = target;
}
@Override
public void go() throws Exception {
FileUtils.deleteDirectory(target);
}
}
}
| Deleter |
java | grpc__grpc-java | examples/example-tls/src/main/java/io/grpc/examples/helloworldtls/HelloWorldServerTls.java | {
"start": 1121,
"end": 3768
} | class ____ {
private static final Logger logger = Logger.getLogger(HelloWorldServerTls.class.getName());
private Server server;
private final int port;
private final ServerCredentials creds;
public HelloWorldServerTls(int port, ServerCredentials creds) {
this.port = port;
this.creds = creds;
}
private void start() throws IOException {
server = Grpc.newServerBuilderForPort(port, creds)
.addService(new GreeterImpl())
.build()
.start();
logger.info("Server started, listening on " + port);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// Use stderr here since the logger may have been reset by its JVM shutdown hook.
System.err.println("*** shutting down gRPC server since JVM is shutting down");
HelloWorldServerTls.this.stop();
System.err.println("*** server shut down");
}
});
}
private void stop() {
if (server != null) {
server.shutdown();
}
}
/**
* Await termination on the main thread since the grpc library uses daemon threads.
*/
private void blockUntilShutdown() throws InterruptedException {
if (server != null) {
server.awaitTermination();
}
}
/**
* Main launches the server from the command line.
*/
public static void main(String[] args) throws IOException, InterruptedException {
if (args.length < 3 || args.length > 4) {
System.out.println(
"USAGE: HelloWorldServerTls port certChainFilePath privateKeyFilePath " +
"[trustCertCollectionFilePath]\n Note: You only need to supply trustCertCollectionFilePath if you want " +
"to enable Mutual TLS.");
System.exit(0);
}
// If only providing a private key, you can use TlsServerCredentials.create() instead of
// interacting with the Builder.
TlsServerCredentials.Builder tlsBuilder = TlsServerCredentials.newBuilder()
.keyManager(new File(args[1]), new File(args[2]));
if (args.length == 4) {
tlsBuilder.trustManager(new File(args[3]));
tlsBuilder.clientAuth(TlsServerCredentials.ClientAuth.REQUIRE);
}
final HelloWorldServerTls server = new HelloWorldServerTls(
Integer.parseInt(args[0]), tlsBuilder.build());
server.start();
server.blockUntilShutdown();
}
static | HelloWorldServerTls |
java | alibaba__nacos | config/src/test/java/com/alibaba/nacos/config/server/controller/v2/ConfigControllerV2Test.java | {
"start": 3321,
"end": 16359
} | class ____ {
private static final String TEST_DATA_ID = "test";
private static final String TEST_GROUP = "test";
private static final String TEST_NAMESPACE_ID = "";
private static final String TEST_NAMESPACE_ID_PUBLIC = "public";
private static final String TEST_TAG = "";
private static final String TEST_CONTENT = "test config";
private static final String TEST_ENCRYPTED_DATA_KEY = "test_encrypted_data_key";
@InjectMocks
private AuthFilter authFilter;
@Mock
private NacosAuthConfig authConfig;
@Mock
private ControllerMethodsCache controllerMethodsCache;
private ConfigControllerV2 configControllerV2;
private MockMvc mockmvc;
@Mock
private ConfigServletInner inner;
@Mock
private ConfigOperationService configOperationService;
@Mock
private ServletContext servletContext;
@Mock
private ConfigInfoPersistService configInfoPersistService;
private ConfigDetailService configDetailService;
@BeforeEach
void setUp() {
EnvUtil.setEnvironment(new StandardEnvironment());
configDetailService = new ConfigDetailService(configInfoPersistService);
configControllerV2 = new ConfigControllerV2(inner, configOperationService, configDetailService);
mockmvc = MockMvcBuilders.standaloneSetup(configControllerV2).addFilter(authFilter).build();
when(authConfig.isAuthEnabled()).thenReturn(false);
}
@Test
void testGetConfig() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
Result<String> stringResult = Result.success(TEST_CONTENT);
doAnswer(x -> {
x.getArgument(1, HttpServletResponse.class).setStatus(200);
x.getArgument(1, HttpServletResponse.class)
.setContentType(com.alibaba.nacos.common.http.param.MediaType.APPLICATION_JSON);
x.getArgument(1, HttpServletResponse.class).getWriter().print(JacksonUtils.toJson(stringResult));
return null;
}).when(inner).doGetConfig(any(HttpServletRequest.class), any(HttpServletResponse.class), eq(TEST_DATA_ID),
eq(TEST_GROUP), eq(TEST_NAMESPACE_ID_PUBLIC), eq(TEST_TAG), eq(null), anyString(),
eq(ApiVersionEnum.V2));
configControllerV2.getConfig(request, response, TEST_DATA_ID, TEST_GROUP, TEST_NAMESPACE_ID, TEST_TAG);
verify(inner).doGetConfig(eq(request), eq(response), eq(TEST_DATA_ID), eq(TEST_GROUP),
eq(TEST_NAMESPACE_ID_PUBLIC), eq(TEST_TAG), eq(null), anyString(), eq(ApiVersionEnum.V2));
JsonNode resNode = JacksonUtils.toObj(response.getContentAsString());
Integer errCode = JacksonUtils.toObj(resNode.get("code").toString(), Integer.class);
String actContent = JacksonUtils.toObj(resNode.get("data").toString(), String.class);
assertEquals(200, response.getStatus());
assertEquals(ErrorCode.SUCCESS.getCode(), errCode);
assertEquals(TEST_CONTENT, actContent);
}
@Test
void testPublishConfig() throws Exception {
ConfigForm configForm = new ConfigForm();
configForm.setDataId(TEST_DATA_ID);
configForm.setGroup(TEST_GROUP);
configForm.setNamespaceId(TEST_NAMESPACE_ID);
configForm.setContent(TEST_CONTENT);
MockHttpServletRequest request = new MockHttpServletRequest();
when(configOperationService.publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class),
anyString())).thenReturn(true);
Result<Boolean> booleanResult = configControllerV2.publishConfig(configForm, request);
verify(configOperationService).publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString());
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
@Test
void testPublishConfigWithEncryptedDataKey() throws Exception {
ConfigForm configForm = new ConfigForm();
configForm.setDataId(TEST_DATA_ID);
configForm.setGroup(TEST_GROUP);
configForm.setNamespaceId(TEST_NAMESPACE_ID);
configForm.setContent(TEST_CONTENT);
configForm.setEncryptedDataKey(TEST_ENCRYPTED_DATA_KEY);
MockHttpServletRequest request = new MockHttpServletRequest();
when(configOperationService.publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class),
eq(TEST_ENCRYPTED_DATA_KEY))).thenReturn(true);
Result<Boolean> booleanResult = configControllerV2.publishConfig(configForm, request);
verify(configOperationService).publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString());
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
@Test
void testPublishConfigWhenNameSpaceIsPublic() throws Exception {
ConfigForm configForm = new ConfigForm();
configForm.setDataId(TEST_DATA_ID);
configForm.setGroup(TEST_GROUP);
configForm.setNamespaceId(TEST_NAMESPACE_ID_PUBLIC);
configForm.setContent(TEST_CONTENT);
MockHttpServletRequest request = new MockHttpServletRequest();
Answer<Boolean> answer = invocationOnMock -> {
if (invocationOnMock.getArgument(0, ConfigForm.class).getNamespaceId().equals(TEST_NAMESPACE_ID_PUBLIC)) {
return true;
}
return false;
};
when(configOperationService.publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class),
anyString())).thenAnswer(answer);
Result<Boolean> booleanResult = configControllerV2.publishConfig(configForm, request);
verify(configOperationService).publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString());
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
@Test
void testDeleteConfigWhenNameSpaceIsPublic() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
when(configOperationService.deleteConfig(eq(TEST_DATA_ID), eq(TEST_GROUP), eq(TEST_NAMESPACE_ID_PUBLIC),
eq(TEST_TAG), any(), any(), any())).thenReturn(true);
Result<Boolean> booleanResult = configControllerV2.deleteConfig(request, TEST_DATA_ID, TEST_GROUP,
TEST_NAMESPACE_ID, TEST_TAG);
verify(configOperationService).deleteConfig(eq(TEST_DATA_ID), eq(TEST_GROUP), eq(TEST_NAMESPACE_ID_PUBLIC),
eq(TEST_TAG), any(), any(), eq("http"));
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
@Test
void testDeleteConfig() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
when(configOperationService.deleteConfig(eq(TEST_DATA_ID), eq(TEST_GROUP), eq(TEST_NAMESPACE_ID_PUBLIC),
eq(TEST_TAG), any(), any(), eq("http"))).thenReturn(true);
Result<Boolean> booleanResult = configControllerV2.deleteConfig(request, TEST_DATA_ID, TEST_GROUP,
TEST_NAMESPACE_ID, TEST_TAG);
verify(configOperationService).deleteConfig(eq(TEST_DATA_ID), eq(TEST_GROUP), eq(TEST_NAMESPACE_ID_PUBLIC),
eq(TEST_TAG), any(), any(), eq("http"));
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
@Test
void testGetConfigByDetail() throws Exception {
List<ConfigInfo> configInfoList = new ArrayList<>();
ConfigInfo configInfo = new ConfigInfo("test", "test", "test");
configInfoList.add(configInfo);
Page<ConfigInfo> page = new Page<>();
page.setTotalCount(15);
page.setPageNumber(1);
page.setPagesAvailable(2);
page.setPageItems(configInfoList);
Map<String, Object> configAdvanceInfo = new HashMap<>(8);
configAdvanceInfo.put("content", "server.port");
when(configInfoPersistService.findConfigInfo4Page(1, 10, "test", "test", "public",
configAdvanceInfo)).thenReturn(page);
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(
Constants.CONFIG_CONTROLLER_V2_PATH + "/searchDetail").param("search", "accurate")
.param("dataId", "test").param("group", "test").param("appName", "").param("tenant", "")
.param("config_tags", "").param("pageNo", "1").param("pageSize", "10")
.param("config_detail", "server.port");
MockHttpServletResponse response = mockmvc.perform(builder).andReturn().getResponse();
String actualValue = response.getContentAsString();
JsonNode pageItemsNode = JacksonUtils.toObj(actualValue).get("pageItems");
List resultList = JacksonUtils.toObj(pageItemsNode.toString(), List.class);
ConfigInfo resConfigInfo = JacksonUtils.toObj(pageItemsNode.get(0).toString(), ConfigInfo.class);
assertEquals(configInfoList.size(), resultList.size());
assertEquals(configInfo.getDataId(), resConfigInfo.getDataId());
assertEquals(configInfo.getGroup(), resConfigInfo.getGroup());
assertEquals(configInfo.getContent(), resConfigInfo.getContent());
}
@Test
void testGetConfigFuzzyByDetail() throws Exception {
List<ConfigInfo> configInfoList = new ArrayList<>();
ConfigInfo configInfo = new ConfigInfo("test", "test", "test");
configInfoList.add(configInfo);
Page<ConfigInfo> page = new Page<>();
page.setTotalCount(15);
page.setPageNumber(1);
page.setPagesAvailable(2);
page.setPageItems(configInfoList);
Map<String, Object> configAdvanceInfo = new HashMap<>(8);
configAdvanceInfo.put("content", "server.port");
when(configInfoPersistService.findConfigInfoLike4Page(1, 10, "test", "test", "public",
configAdvanceInfo)).thenReturn(page);
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(
Constants.CONFIG_CONTROLLER_V2_PATH + "/searchDetail").param("search", "blur").param("dataId", "test")
.param("group", "test").param("appName", "").param("tenant", "").param("config_tags", "")
.param("pageNo", "1").param("pageSize", "10").param("config_detail", "server.port");
MockHttpServletResponse response = mockmvc.perform(builder).andReturn().getResponse();
String actualValue = response.getContentAsString();
JsonNode pageItemsNode = JacksonUtils.toObj(actualValue).get("pageItems");
List resultList = JacksonUtils.toObj(pageItemsNode.toString(), List.class);
ConfigInfo resConfigInfo = JacksonUtils.toObj(pageItemsNode.get(0).toString(), ConfigInfo.class);
assertEquals(configInfoList.size(), resultList.size());
assertEquals(configInfo.getDataId(), resConfigInfo.getDataId());
assertEquals(configInfo.getGroup(), resConfigInfo.getGroup());
assertEquals(configInfo.getContent(), resConfigInfo.getContent());
}
@Test
void testGetConfigAuthFilter() throws Exception {
when(authConfig.isAuthEnabled()).thenReturn(true);
Method method = Arrays.stream(ConfigControllerV2.class.getMethods())
.filter(m -> m.getName().equals("searchConfigByDetails")).findFirst().get();
when(controllerMethodsCache.getMethod(any(HttpServletRequest.class))).thenReturn(method);
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(
Constants.CONFIG_CONTROLLER_V2_PATH + "/searchDetail").param("search", "accurate")
.param("dataId", "test").param("group", "test").param("appName", "").param("tenant", "")
.param("config_tags", "").param("pageNo", "1").param("pageSize", "10")
.param("config_detail", "server.port");
MockHttpServletResponse response = mockmvc.perform(builder).andReturn().getResponse();
assertEquals(HttpServletResponse.SC_FORBIDDEN, response.getStatus());
assertEquals(response.getErrorMessage(),
"Invalid server identity key or value, Please make sure set `nacos.core.auth.server.identity.key`"
+ " and `nacos.core.auth.server.identity.value`, or open `nacos.core.auth.enable.userAgentAuthWhite`");
}
}
| ConfigControllerV2Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java | {
"start": 2325,
"end": 20744
} | class ____ implements Queue, Schedulable {
private static final Logger LOG = LoggerFactory.getLogger(
FSQueue.class.getName());
private Resource fairShare = Resources.createResource(0, 0);
private Resource steadyFairShare = Resources.createResource(0, 0);
private Resource reservedResource = Resources.createResource(0, 0);
private final Resource resourceUsage = Resource.newInstance(0, 0);
private final String name;
protected final FairScheduler scheduler;
private final YarnAuthorizationProvider authorizer;
private final PrivilegedEntity queueEntity;
private final FSQueueMetrics metrics;
protected final FSParentQueue parent;
protected final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
protected SchedulingPolicy policy = SchedulingPolicy.DEFAULT_POLICY;
protected float weights;
protected Resource minShare;
private ConfigurableResource maxShare;
protected int maxRunningApps;
private ConfigurableResource maxChildQueueResource;
// maxAMShare is a value between 0 and 1.
protected float maxAMShare;
private long fairSharePreemptionTimeout = Long.MAX_VALUE;
private long minSharePreemptionTimeout = Long.MAX_VALUE;
private float fairSharePreemptionThreshold = 0.5f;
private boolean preemptable = true;
private boolean isDynamic = true;
protected Resource maxContainerAllocation;
public FSQueue(String name, FairScheduler scheduler, FSParentQueue parent) {
this.name = name;
this.scheduler = scheduler;
this.authorizer =
YarnAuthorizationProvider.getInstance(scheduler.getConf());
this.queueEntity = new PrivilegedEntity(EntityType.QUEUE, name);
this.metrics = FSQueueMetrics.forQueue(getName(), parent, true, scheduler.getConf());
this.parent = parent;
setPolicy(scheduler.getAllocationConfiguration().getSchedulingPolicy(name));
reinit(false);
}
/**
* Initialize a queue by setting its queue-specific properties and its
* metrics. This method is invoked when creating a new queue or reloading
* the allocation file.
* This method does not set policies for queues when reloading the allocation
* file since we need to either set all new policies or nothing, which is
* handled by method {@link #verifyAndSetPolicyFromConf}.
*
* @param recursive whether child queues should be reinitialized recursively
*/
public final void reinit(boolean recursive) {
AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
allocConf.initFSQueue(this);
updatePreemptionVariables();
if (recursive) {
for (FSQueue child : getChildQueues()) {
child.reinit(recursive);
}
}
}
public String getName() {
return name;
}
@Override
public String getQueueName() {
return name;
}
public SchedulingPolicy getPolicy() {
return policy;
}
public FSParentQueue getParent() {
return parent;
}
public void setPolicy(SchedulingPolicy policy) {
policy.initialize(scheduler.getContext());
this.policy = policy;
}
public void setWeights(float weights) {
this.weights = weights;
}
@Override
public float getWeight() {
return weights;
}
public void setMinShare(Resource minShare){
this.minShare = minShare;
}
@Override
public Resource getMinShare() {
return minShare;
}
public void setMaxShare(ConfigurableResource maxShare){
this.maxShare = maxShare;
}
public void setMaxContainerAllocation(Resource maxContainerAllocation){
this.maxContainerAllocation = maxContainerAllocation;
}
public abstract Resource getMaximumContainerAllocation();
@Override
public Resource getMaxShare() {
Resource maxResource = maxShare.getResource(scheduler.getClusterResource());
// Max resource should be greater than or equal to min resource
Resource result = Resources.componentwiseMax(maxResource, minShare);
if (!Resources.equals(maxResource, result)) {
LOG.warn(String.format("Queue %s has max resources %s less than "
+ "min resources %s", getName(), maxResource, minShare));
}
return result;
}
public ConfigurableResource getRawMaxShare() {
return maxShare;
}
public Resource getReservedResource() {
reservedResource.setMemorySize(metrics.getReservedMB());
reservedResource.setVirtualCores(metrics.getReservedVirtualCores());
return reservedResource;
}
public void setMaxChildQueueResource(ConfigurableResource maxChildShare){
this.maxChildQueueResource = maxChildShare;
}
public ConfigurableResource getMaxChildQueueResource() {
return maxChildQueueResource;
}
public void setMaxRunningApps(int maxRunningApps){
this.maxRunningApps = maxRunningApps;
}
public int getMaxRunningApps() {
return maxRunningApps;
}
@VisibleForTesting
public float getMaxAMShare() {
return maxAMShare;
}
public void setMaxAMShare(float maxAMShare){
this.maxAMShare = maxAMShare;
}
@Override
public long getStartTime() {
return 0;
}
@Override
public Priority getPriority() {
Priority p = recordFactory.newRecordInstance(Priority.class);
p.setPriority(1);
return p;
}
@Override
public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
queueInfo.setSchedulerType("FairScheduler");
queueInfo.setQueueName(getQueueName());
if (scheduler.getClusterResource().getMemorySize() == 0) {
queueInfo.setCapacity(0.0f);
} else {
queueInfo.setCapacity((float) getFairShare().getMemorySize() /
scheduler.getClusterResource().getMemorySize());
}
if (getFairShare().getMemorySize() == 0) {
queueInfo.setCurrentCapacity(0.0f);
} else {
queueInfo.setCurrentCapacity((float) getResourceUsage().getMemorySize() /
getFairShare().getMemorySize());
}
// set Weight
queueInfo.setWeight(getWeight());
// set MinShareResource
Resource minShareResource = getMinShare();
queueInfo.setMinResourceVCore(minShareResource.getVirtualCores());
queueInfo.setMinResourceMemory(minShareResource.getMemorySize());
// set MaxShareResource
Resource maxShareResource =
Resources.componentwiseMin(getMaxShare(), scheduler.getClusterResource());
queueInfo.setMaxResourceVCore(maxShareResource.getVirtualCores());
queueInfo.setMaxResourceMemory(maxShareResource.getMemorySize());
// set ReservedResource
Resource newReservedResource = getReservedResource();
queueInfo.setReservedResourceVCore(newReservedResource.getVirtualCores());
queueInfo.setReservedResourceMemory(newReservedResource.getMemorySize());
// set SteadyFairShare
Resource newSteadyFairShare = getSteadyFairShare();
queueInfo.setSteadyFairShareVCore(newSteadyFairShare.getVirtualCores());
queueInfo.setSteadyFairShareMemory(newSteadyFairShare.getMemorySize());
// set MaxRunningApp
queueInfo.setMaxRunningApp(getMaxRunningApps());
// set Preemption
queueInfo.setPreemptionDisabled(isPreemptable());
ArrayList<QueueInfo> childQueueInfos = new ArrayList<>();
if (includeChildQueues) {
Collection<FSQueue> childQueues = getChildQueues();
for (FSQueue child : childQueues) {
childQueueInfos.add(child.getQueueInfo(recursive, recursive));
}
}
queueInfo.setChildQueues(childQueueInfos);
queueInfo.setQueueState(QueueState.RUNNING);
queueInfo.setQueueStatistics(getQueueStatistics());
return queueInfo;
}
public QueueStatistics getQueueStatistics() {
QueueStatistics stats =
recordFactory.newRecordInstance(QueueStatistics.class);
stats.setNumAppsSubmitted(getMetrics().getAppsSubmitted());
stats.setNumAppsRunning(getMetrics().getAppsRunning());
stats.setNumAppsPending(getMetrics().getAppsPending());
stats.setNumAppsCompleted(getMetrics().getAppsCompleted());
stats.setNumAppsKilled(getMetrics().getAppsKilled());
stats.setNumAppsFailed(getMetrics().getAppsFailed());
stats.setNumActiveUsers(getMetrics().getActiveUsers());
stats.setAvailableMemoryMB(getMetrics().getAvailableMB());
stats.setAllocatedMemoryMB(getMetrics().getAllocatedMB());
stats.setPendingMemoryMB(getMetrics().getPendingMB());
stats.setReservedMemoryMB(getMetrics().getReservedMB());
stats.setAvailableVCores(getMetrics().getAvailableVirtualCores());
stats.setAllocatedVCores(getMetrics().getAllocatedVirtualCores());
stats.setPendingVCores(getMetrics().getPendingVirtualCores());
stats.setReservedVCores(getMetrics().getReservedVirtualCores());
stats.setAllocatedContainers(getMetrics().getAllocatedContainers());
stats.setPendingContainers(getMetrics().getPendingContainers());
stats.setReservedContainers(getMetrics().getReservedContainers());
return stats;
}
@Override
public FSQueueMetrics getMetrics() {
return metrics;
}
/** Get the fair share assigned to this Schedulable. */
public Resource getFairShare() {
return fairShare;
}
@Override
public void setFairShare(Resource fairShare) {
this.fairShare = fairShare;
metrics.setFairShare(fairShare);
LOG.debug("The updated fairShare for {} is {}", getName(), fairShare);
}
/**
* Get the steady fair share assigned to this Schedulable.
* @return the steady fair share assigned to this Schedulable.
*/
public Resource getSteadyFairShare() {
return steadyFairShare;
}
void setSteadyFairShare(Resource steadyFairShare) {
this.steadyFairShare = steadyFairShare;
metrics.setSteadyFairShare(steadyFairShare);
}
public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
return authorizer.checkPermission(
new AccessRequest(queueEntity, user,
SchedulerUtils.toAccessType(acl), null, null,
Server.getRemoteAddress(), null));
}
long getFairSharePreemptionTimeout() {
return fairSharePreemptionTimeout;
}
void setFairSharePreemptionTimeout(long fairSharePreemptionTimeout) {
this.fairSharePreemptionTimeout = fairSharePreemptionTimeout;
}
long getMinSharePreemptionTimeout() {
return minSharePreemptionTimeout;
}
void setMinSharePreemptionTimeout(long minSharePreemptionTimeout) {
this.minSharePreemptionTimeout = minSharePreemptionTimeout;
}
float getFairSharePreemptionThreshold() {
return fairSharePreemptionThreshold;
}
void setFairSharePreemptionThreshold(float fairSharePreemptionThreshold) {
this.fairSharePreemptionThreshold = fairSharePreemptionThreshold;
}
@Override
public boolean isPreemptable() {
return preemptable;
}
/**
* Recomputes the shares for all child queues and applications based on this
* queue's current share.
*
* To be called holding the scheduler writelock.
*/
abstract void updateInternal();
/**
* Set the queue's fairshare and update the demand/fairshare of child
* queues/applications.
*
* To be called holding the scheduler writelock.
*
* @param fairShare queue's fairshare.
*/
public void update(Resource fairShare) {
setFairShare(fairShare);
updateInternal();
}
/**
* Update the min/fair share preemption timeouts, threshold and preemption
* disabled flag for this queue.
*/
private void updatePreemptionVariables() {
// For min share timeout
minSharePreemptionTimeout = scheduler.getAllocationConfiguration()
.getMinSharePreemptionTimeout(getName());
if (minSharePreemptionTimeout == -1 && parent != null) {
minSharePreemptionTimeout = parent.getMinSharePreemptionTimeout();
}
// For fair share timeout
fairSharePreemptionTimeout = scheduler.getAllocationConfiguration()
.getFairSharePreemptionTimeout(getName());
if (fairSharePreemptionTimeout == -1 && parent != null) {
fairSharePreemptionTimeout = parent.getFairSharePreemptionTimeout();
}
// For fair share preemption threshold
fairSharePreemptionThreshold = scheduler.getAllocationConfiguration()
.getFairSharePreemptionThreshold(getName());
if (fairSharePreemptionThreshold < 0 && parent != null) {
fairSharePreemptionThreshold = parent.getFairSharePreemptionThreshold();
}
// For option whether allow preemption from this queue.
// If the parent is non-preemptable, this queue is non-preemptable as well,
// otherwise get the value from the allocation file.
if (parent != null && !parent.isPreemptable()) {
preemptable = false;
} else {
preemptable = scheduler.getAllocationConfiguration()
.isPreemptable(getName());
}
}
/**
* Gets the children of this queue, if any.
*
* @return the children of this queue.
*/
public abstract List<FSQueue> getChildQueues();
/**
* Adds all applications in the queue and its subqueues to the given collection.
* @param apps the collection to add the applications to
*/
public abstract void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps);
/**
* Return the number of apps for which containers can be allocated.
* Includes apps in subqueues.
*
* @return the number of apps.
*/
public abstract int getNumRunnableApps();
/**
* Helper method to check if the queue should attempt assigning resources
*
* @return true if check passes (can assign) or false otherwise
*/
boolean assignContainerPreCheck(FSSchedulerNode node) {
if (node.getReservedContainer() != null) {
LOG.debug("Assigning container failed on node '{}' because it has"
+ " reserved containers.", node.getNodeName());
return false;
} else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Assigning container failed on node '" + node.getNodeName()
+ " because queue resource usage is larger than MaxShare: "
+ dumpState());
}
return false;
} else {
return true;
}
}
/**
* Returns true if queue has at least one app running.
*
* @return true, if queue has at least one app running; otherwise, false;
*/
public boolean isActive() {
return getNumRunnableApps() > 0;
}
/** Convenient toString implementation for debugging. */
@Override
public String toString() {
return String.format("[%s, demand=%s, running=%s, share=%s, w=%s]",
getName(), getDemand(), getResourceUsage(), fairShare, getWeight());
}
@Override
public Set<String> getAccessibleNodeLabels() {
// TODO, add implementation for FS
return null;
}
@Override
public String getDefaultNodeLabelExpression() {
// TODO, add implementation for FS
return null;
}
@Override
public void incPendingResource(String nodeLabel, Resource resourceToInc) {
}
@Override
public void decPendingResource(String nodeLabel, Resource resourceToDec) {
}
@Override
public void incReservedResource(String nodeLabel, Resource resourceToInc) {
}
@Override
public void decReservedResource(String nodeLabel, Resource resourceToDec) {
}
@Override
public Resource getResourceUsage() {
return resourceUsage;
}
/**
* Increase resource usage for this queue and all parent queues.
*
* @param res the resource to increase
*/
public void incUsedResource(Resource res) {
synchronized (resourceUsage) {
Resources.addTo(resourceUsage, res);
if (parent != null) {
parent.incUsedResource(res);
}
}
}
/**
* Decrease resource usage for this queue and all parent queues.
*
* @param res the resource to decrease
*/
protected void decUsedResource(Resource res) {
synchronized (resourceUsage) {
Resources.subtractFrom(resourceUsage, res);
if (parent != null) {
parent.decUsedResource(res);
}
}
}
@Override
public Priority getDefaultApplicationPriority() {
// TODO add implementation for FSParentQueue
return null;
}
boolean fitsInMaxShare(Resource additionalResource) {
Resource usagePlusAddition =
Resources.add(getResourceUsage(), additionalResource);
if (!Resources.fitsIn(usagePlusAddition, getMaxShare())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Resource usage plus resource request: " + usagePlusAddition
+ " exceeds maximum resource allowed:" + getMaxShare()
+ " in queue " + getName());
}
return false;
}
FSQueue parentQueue = getParent();
if (parentQueue != null) {
return parentQueue.fitsInMaxShare(additionalResource);
}
return true;
}
/**
* Recursively check policies for queues in pre-order. Get queue policies
* from the allocation file instead of properties of {@link FSQueue} objects.
* Set the policy for current queue if there is no policy violation for its
* children. This method is invoked while reloading the allocation file.
*
* @param queueConf allocation configuration
* @return true if no policy violation and successfully set polices
* for queues; false otherwise
*/
public boolean verifyAndSetPolicyFromConf(AllocationConfiguration queueConf) {
SchedulingPolicy queuePolicy = queueConf.getSchedulingPolicy(getName());
for (FSQueue child : getChildQueues()) {
if (!queuePolicy.isChildPolicyAllowed(
queueConf.getSchedulingPolicy(child.getName()))) {
return false;
}
boolean success = child.verifyAndSetPolicyFromConf(queueConf);
if (!success) {
return false;
}
}
// Set the policy if no policy violation for all children
setPolicy(queuePolicy);
return true;
}
/**
* Recursively dump states of all queues.
*
* @return a string which holds all queue states
*/
public String dumpState() {
StringBuilder sb = new StringBuilder();
dumpStateInternal(sb);
return sb.toString();
}
/**
* Recursively dump states of all queues.
*
* @param sb the {code StringBuilder} which holds queue states
*/
protected abstract void dumpStateInternal(StringBuilder sb);
public boolean isDynamic() {
return isDynamic;
}
public void setDynamic(boolean dynamic) {
this.isDynamic = dynamic;
}
public abstract boolean isEmpty();
}
| FSQueue |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 10326,
"end": 10834
} | class ____ {
@FormUrlEncoded //
@GET("/") //
Call<ResponseBody> method() {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"FormUrlEncoded can only be specified on HTTP methods with request body (e.g., @POST).\n for method Example.method");
}
}
@Test
public void formEncodingFailsWithNoParts() {
| Example |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/image/writer/ImageWriterOptionsTest.java | {
"start": 2099,
"end": 5288
} | class ____ {
@Test
public void testDefaultLossHandler() {
ImageWriterOptions options = new ImageWriterOptions.Builder(MetadataVersion.latestProduction()).build();
assertEquals("stuff", assertThrows(UnwritableMetadataException.class,
() -> options.handleLoss("stuff")).loss());
}
@Test
public void testHandleLoss() {
String expectedMessage = "stuff";
for (int i = MetadataVersion.MINIMUM_VERSION.ordinal();
i < MetadataVersion.VERSIONS.length;
i++) {
MetadataVersion version = MetadataVersion.VERSIONS[i];
String formattedMessage = String.format("Metadata has been lost because the following could not be represented in metadata.version %s: %s", version, expectedMessage);
Consumer<UnwritableMetadataException> customLossHandler = e -> assertEquals(formattedMessage, e.getMessage());
ImageWriterOptions options = new ImageWriterOptions.Builder(version)
.setLossHandler(customLossHandler)
.build();
options.handleLoss(expectedMessage);
}
}
@Test
public void testSetEligibleLeaderReplicasEnabled() {
MetadataVersion version = MetadataVersion.MINIMUM_VERSION;
ImageWriterOptions options = new ImageWriterOptions.Builder(version).
setEligibleLeaderReplicasEnabled(true).build();
assertTrue(options.isEligibleLeaderReplicasEnabled());
options = new ImageWriterOptions.Builder(version).build();
assertFalse(options.isEligibleLeaderReplicasEnabled());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testConstructionWithImage(boolean isElrEnabled) {
FeaturesDelta featuresDelta = new FeaturesDelta(FeaturesImage.EMPTY);
featuresDelta.replay(new FeatureLevelRecord().
setName(EligibleLeaderReplicasVersion.FEATURE_NAME).
setFeatureLevel(isElrEnabled ?
EligibleLeaderReplicasVersion.ELRV_1.featureLevel() : EligibleLeaderReplicasVersion.ELRV_0.featureLevel()
)
);
featuresDelta.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.IBP_4_0_IV1.featureLevel())
);
MetadataImage metadataImage = new MetadataImage(
new MetadataProvenance(100, 4, 2000, true),
featuresDelta.apply(),
ClusterImageTest.IMAGE1,
TopicsImageTest.IMAGE1,
ConfigurationsImageTest.IMAGE1,
ClientQuotasImageTest.IMAGE1,
ProducerIdsImageTest.IMAGE1,
AclsImageTest.IMAGE1,
ScramImageTest.IMAGE1,
DelegationTokenImageTest.IMAGE1
);
ImageWriterOptions options = new ImageWriterOptions.Builder(metadataImage).build();
assertEquals(MetadataVersion.IBP_4_0_IV1, options.metadataVersion());
if (isElrEnabled) {
assertTrue(options.isEligibleLeaderReplicasEnabled());
} else {
assertFalse(options.isEligibleLeaderReplicasEnabled());
}
}
}
| ImageWriterOptionsTest |
java | quarkusio__quarkus | integration-tests/hibernate-orm-panache/src/main/java/io/quarkus/it/panache/defaultpu/BookDao.java | {
"start": 186,
"end": 252
} | class ____ implements PanacheRepositoryBase<Book, Integer> {
}
| BookDao |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsCollectionTests.java | {
"start": 1329,
"end": 4621
} | class ____ extends ESTestCase {
private NamedWriteableRegistry namedWriteableRegistry;
@Before
public void registerNamedObjects() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
List<NamedWriteableRegistry.Entry> namedWriteables = searchModule.getNamedWriteables();
namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
}
public void testDataStreamName() {
AnalyticsCollection collection = randomAnalyticsCollection();
String expectedDataStreamName = EVENT_DATA_STREAM_INDEX_PREFIX + collection.getName();
assertEquals(expectedDataStreamName, collection.getEventDataStream());
}
public final void testRandomSerialization() throws IOException {
for (int runs = 0; runs < 10; runs++) {
AnalyticsCollection collection = randomAnalyticsCollection();
assertTransportSerialization(collection, TransportVersion.current());
assertXContent(collection, randomBoolean());
}
}
public void testToXContent() throws IOException {
String content = XContentHelper.stripWhitespace("""
{ }
""");
AnalyticsCollection collection = AnalyticsCollection.fromXContentBytes("my_collection", new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(collection, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
AnalyticsCollection parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = AnalyticsCollection.fromXContent(collection.getName(), parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
private AnalyticsCollection assertXContent(AnalyticsCollection collection, boolean humanReadable) throws IOException {
BytesReference originalBytes = toShuffledXContent(collection, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
AnalyticsCollection parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = AnalyticsCollection.fromXContent(collection.getName(), parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
return parsed;
}
private AnalyticsCollection assertTransportSerialization(AnalyticsCollection testInstance, TransportVersion version)
throws IOException {
AnalyticsCollection deserializedInstance = copyInstance(testInstance, version);
assertNotSame(testInstance, deserializedInstance);
assertThat(testInstance, equalTo(deserializedInstance));
return deserializedInstance;
}
private AnalyticsCollection copyInstance(AnalyticsCollection instance, TransportVersion version) throws IOException {
return copyWriteable(instance, namedWriteableRegistry, AnalyticsCollection::new, version);
}
private static AnalyticsCollection randomAnalyticsCollection() {
return new AnalyticsCollection(randomIdentifier());
}
}
| AnalyticsCollectionTests |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/api/config/ConfigChangeEventTest.java | {
"start": 862,
"end": 1426
} | class ____ {
@Test
void testConstructor() {
Map<String, ConfigChangeItem> mockData = new HashMap<>();
mockData.put("test", new ConfigChangeItem("testKey", null, "testValue"));
ConfigChangeEvent event = new ConfigChangeEvent(mockData);
assertEquals(1, event.getChangeItems().size());
assertEquals("testKey", event.getChangeItem("test").getKey());
assertNull(event.getChangeItem("test").getOldValue());
assertEquals("testValue", event.getChangeItem("test").getNewValue());
}
} | ConfigChangeEventTest |
java | apache__flink | flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonParseException.java | {
"start": 915,
"end": 1204
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 1L;
public JsonParseException(String message) {
super(message);
}
public JsonParseException(String message, Throwable cause) {
super(message, cause);
}
}
| JsonParseException |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socks/SocksCmdResponse.java | {
"start": 977,
"end": 6868
} | class ____ extends SocksResponse {
private final SocksCmdStatus cmdStatus;
private final SocksAddressType addressType;
private final String host;
private final int port;
// All arrays are initialized on construction time to 0/false/null remove array Initialization
private static final byte[] DOMAIN_ZEROED = {0x00};
private static final byte[] IPv4_HOSTNAME_ZEROED = {0x00, 0x00, 0x00, 0x00};
private static final byte[] IPv6_HOSTNAME_ZEROED = {0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00};
public SocksCmdResponse(SocksCmdStatus cmdStatus, SocksAddressType addressType) {
this(cmdStatus, addressType, null, 0);
}
/**
* Constructs new response and includes provided host and port as part of it.
*
* @param cmdStatus status of the response
* @param addressType type of host parameter
* @param host host (BND.ADDR field) is address that server used when connecting to the target host.
* When null a value of 4/8 0x00 octets will be used for IPv4/IPv6 and a single 0x00 byte will be
* used for domain addressType. Value is converted to ASCII using {@link IDN#toASCII(String)}.
* @param port port (BND.PORT field) that the server assigned to connect to the target host
* @throws NullPointerException in case cmdStatus or addressType are missing
* @throws IllegalArgumentException in case host or port cannot be validated
* @see IDN#toASCII(String)
*/
public SocksCmdResponse(SocksCmdStatus cmdStatus, SocksAddressType addressType, String host, int port) {
super(SocksResponseType.CMD);
ObjectUtil.checkNotNull(cmdStatus, "cmdStatus");
ObjectUtil.checkNotNull(addressType, "addressType");
if (host != null) {
switch (addressType) {
case IPv4:
if (!NetUtil.isValidIpV4Address(host)) {
throw new IllegalArgumentException(host + " is not a valid IPv4 address");
}
break;
case DOMAIN:
String asciiHost = IDN.toASCII(host);
if (asciiHost.length() > 255) {
throw new IllegalArgumentException(host + " IDN: " + asciiHost + " exceeds 255 char limit");
}
host = asciiHost;
break;
case IPv6:
if (!NetUtil.isValidIpV6Address(host)) {
throw new IllegalArgumentException(host + " is not a valid IPv6 address");
}
break;
case UNKNOWN:
break;
}
}
if (port < 0 || port > 65535) {
throw new IllegalArgumentException(port + " is not in bounds 0 <= x <= 65535");
}
this.cmdStatus = cmdStatus;
this.addressType = addressType;
this.host = host;
this.port = port;
}
/**
* Returns the {@link SocksCmdStatus} of this {@link SocksCmdResponse}
*
* @return The {@link SocksCmdStatus} of this {@link SocksCmdResponse}
*/
public SocksCmdStatus cmdStatus() {
return cmdStatus;
}
/**
* Returns the {@link SocksAddressType} of this {@link SocksCmdResponse}
*
* @return The {@link SocksAddressType} of this {@link SocksCmdResponse}
*/
public SocksAddressType addressType() {
return addressType;
}
/**
* Returns host that is used as a parameter in {@link SocksCmdType}.
* Host (BND.ADDR field in response) is address that server used when connecting to the target host.
* This is typically different from address which client uses to connect to the SOCKS server.
*
* @return host that is used as a parameter in {@link SocksCmdType}
* or null when there was no host specified during response construction
*/
public String host() {
return host != null && addressType == SocksAddressType.DOMAIN ? IDN.toUnicode(host) : host;
}
/**
* Returns port that is used as a parameter in {@link SocksCmdType}.
* Port (BND.PORT field in response) is port that the server assigned to connect to the target host.
*
* @return port that is used as a parameter in {@link SocksCmdType}
*/
public int port() {
return port;
}
@Override
public void encodeAsByteBuf(ByteBuf byteBuf) {
byteBuf.writeByte(protocolVersion().byteValue());
byteBuf.writeByte(cmdStatus.byteValue());
byteBuf.writeByte(0x00);
byteBuf.writeByte(addressType.byteValue());
switch (addressType) {
case IPv4: {
byte[] hostContent = host == null ?
IPv4_HOSTNAME_ZEROED : NetUtil.createByteArrayFromIpAddressString(host);
byteBuf.writeBytes(hostContent);
ByteBufUtil.writeShortBE(byteBuf, port);
break;
}
case DOMAIN: {
if (host != null) {
byteBuf.writeByte(host.length());
byteBuf.writeCharSequence(host, CharsetUtil.US_ASCII);
} else {
byteBuf.writeByte(DOMAIN_ZEROED.length);
byteBuf.writeBytes(DOMAIN_ZEROED);
}
ByteBufUtil.writeShortBE(byteBuf, port);
break;
}
case IPv6: {
byte[] hostContent = host == null
? IPv6_HOSTNAME_ZEROED : NetUtil.createByteArrayFromIpAddressString(host);
byteBuf.writeBytes(hostContent);
ByteBufUtil.writeShortBE(byteBuf, port);
break;
}
}
}
}
| SocksCmdResponse |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucket.java | {
"start": 2475,
"end": 13029
} | class ____<IN> {
private static final Logger LOG = LoggerFactory.getLogger(FileWriterBucket.class);
private final String bucketId;
private final Path bucketPath;
private final BucketWriter<IN, String> bucketWriter;
private final RollingPolicy<IN, String> rollingPolicy;
private final OutputFileConfig outputFileConfig;
private final String uniqueId;
private final List<InProgressFileWriter.PendingFileRecoverable> pendingFiles =
new ArrayList<>();
private long partCounter;
@Nullable private InProgressFileRecoverable inProgressFileToCleanup;
@Nullable private InProgressFileWriter<IN, String> inProgressPart;
/** Constructor to create a new empty bucket. */
private FileWriterBucket(
String bucketId,
Path bucketPath,
BucketWriter<IN, String> bucketWriter,
RollingPolicy<IN, String> rollingPolicy,
OutputFileConfig outputFileConfig) {
this.bucketId = checkNotNull(bucketId);
this.bucketPath = checkNotNull(bucketPath);
this.bucketWriter = checkNotNull(bucketWriter);
this.rollingPolicy = checkNotNull(rollingPolicy);
this.outputFileConfig = checkNotNull(outputFileConfig);
this.uniqueId = UUID.randomUUID().toString();
this.partCounter = 0;
}
/** Constructor to restore a bucket from checkpointed state. */
private FileWriterBucket(
BucketWriter<IN, String> partFileFactory,
RollingPolicy<IN, String> rollingPolicy,
FileWriterBucketState bucketState,
OutputFileConfig outputFileConfig)
throws IOException {
this(
bucketState.getBucketId(),
bucketState.getBucketPath(),
partFileFactory,
rollingPolicy,
outputFileConfig);
restoreInProgressFile(bucketState);
// Restore pending files, this only make difference if we are
// migrating from {@code StreamingFileSink}.
cacheRecoveredPendingFiles(bucketState);
}
private void restoreInProgressFile(FileWriterBucketState state) throws IOException {
if (!state.hasInProgressFileRecoverable()) {
return;
}
// we try to resume the previous in-progress file
InProgressFileWriter.InProgressFileRecoverable inProgressFileRecoverable =
state.getInProgressFileRecoverable();
if (bucketWriter.getProperties().supportsResume()) {
inProgressPart =
bucketWriter.resumeInProgressFileFrom(
bucketId,
inProgressFileRecoverable,
state.getInProgressFileCreationTime());
} else {
pendingFiles.add(inProgressFileRecoverable);
}
}
private void cacheRecoveredPendingFiles(FileWriterBucketState state) {
// Cache the previous pending files and send to committer on the first prepareCommit
// operation.
for (List<InProgressFileWriter.PendingFileRecoverable> restoredPendingRecoverables :
state.getPendingFileRecoverablesPerCheckpoint().values()) {
pendingFiles.addAll(restoredPendingRecoverables);
}
}
public String getBucketId() {
return bucketId;
}
public Path getBucketPath() {
return bucketPath;
}
public long getPartCounter() {
return partCounter;
}
public boolean isActive() {
return inProgressPart != null || inProgressFileToCleanup != null || pendingFiles.size() > 0;
}
void merge(final FileWriterBucket<IN> bucket) throws IOException {
checkNotNull(bucket);
checkState(Objects.equals(bucket.bucketPath, bucketPath));
bucket.closePartFile();
pendingFiles.addAll(bucket.pendingFiles);
if (LOG.isDebugEnabled()) {
LOG.debug("Merging buckets for bucket id={}", bucketId);
}
}
void write(IN element, long currentTime) throws IOException {
if (inProgressPart == null || rollingPolicy.shouldRollOnEvent(inProgressPart, element)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Opening new part file for bucket id={} due to element {}.",
bucketId,
element);
}
inProgressPart = rollPartFile(currentTime);
}
inProgressPart.write(element, currentTime);
}
List<FileSinkCommittable> prepareCommit(boolean endOfInput) throws IOException {
if (inProgressPart != null
&& (rollingPolicy.shouldRollOnCheckpoint(inProgressPart) || endOfInput)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Closing in-progress part file for bucket id={} on checkpoint.", bucketId);
}
closePartFile();
}
List<FileSinkCommittable> committables = new ArrayList<>();
pendingFiles.forEach(
pendingFile -> committables.add(new FileSinkCommittable(bucketId, pendingFile)));
pendingFiles.clear();
if (inProgressFileToCleanup != null) {
committables.add(new FileSinkCommittable(bucketId, inProgressFileToCleanup));
inProgressFileToCleanup = null;
}
return committables;
}
FileWriterBucketState snapshotState() throws IOException {
InProgressFileWriter.InProgressFileRecoverable inProgressFileRecoverable = null;
long inProgressFileCreationTime = Long.MAX_VALUE;
if (inProgressPart != null) {
inProgressFileRecoverable = inProgressPart.persist();
inProgressFileToCleanup = inProgressFileRecoverable;
inProgressFileCreationTime = inProgressPart.getCreationTime();
}
return new FileWriterBucketState(
bucketId, bucketPath, inProgressFileCreationTime, inProgressFileRecoverable);
}
void onProcessingTime(long timestamp) throws IOException {
if (inProgressPart != null
&& rollingPolicy.shouldRollOnProcessingTime(inProgressPart, timestamp)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Bucket {} closing in-progress part file for part file id={} due to processing time rolling policy "
+ "(in-progress file created @ {}, last updated @ {} and current time is {}).",
bucketId,
uniqueId,
inProgressPart.getCreationTime(),
inProgressPart.getLastUpdateTime(),
timestamp);
}
closePartFile();
}
}
private InProgressFileWriter<IN, String> rollPartFile(long currentTime) throws IOException {
closePartFile();
final Path partFilePath = assembleNewPartPath();
if (LOG.isDebugEnabled()) {
LOG.debug(
"Opening new part file \"{}\" for bucket id={}.",
partFilePath.getName(),
bucketId);
}
return bucketWriter.openNewInProgressFile(bucketId, partFilePath, currentTime);
}
/** Constructor a new PartPath and increment the partCounter. */
private Path assembleNewPartPath() {
long currentPartCounter = partCounter++;
return new Path(
bucketPath,
outputFileConfig.getPartPrefix()
+ '-'
+ uniqueId
+ '-'
+ currentPartCounter
+ outputFileConfig.getPartSuffix());
}
private void closePartFile() throws IOException {
if (inProgressPart != null) {
InProgressFileWriter.PendingFileRecoverable pendingFileRecoverable =
inProgressPart.closeForCommit();
pendingFiles.add(pendingFileRecoverable);
inProgressPart = null;
}
}
void disposePartFile() {
if (inProgressPart != null) {
inProgressPart.dispose();
}
}
// --------------------------- Testing Methods -----------------------------
@VisibleForTesting
public String getUniqueId() {
return uniqueId;
}
@Nullable
@VisibleForTesting
InProgressFileWriter<IN, String> getInProgressPart() {
return inProgressPart;
}
@VisibleForTesting
public List<InProgressFileWriter.PendingFileRecoverable> getPendingFiles() {
return pendingFiles;
}
// --------------------------- Static Factory Methods -----------------------------
/**
* Creates a new empty {@code Bucket}.
*
* @param bucketId the identifier of the bucket, as returned by the {@link BucketAssigner}.
* @param bucketPath the path to where the part files for the bucket will be written to.
* @param bucketWriter the {@link BucketWriter} used to write part files in the bucket.
* @param <IN> the type of input elements to the sink.
* @param outputFileConfig the part file configuration.
* @return The new Bucket.
*/
static <IN> FileWriterBucket<IN> getNew(
final String bucketId,
final Path bucketPath,
final BucketWriter<IN, String> bucketWriter,
final RollingPolicy<IN, String> rollingPolicy,
final OutputFileConfig outputFileConfig) {
return new FileWriterBucket<>(
bucketId, bucketPath, bucketWriter, rollingPolicy, outputFileConfig);
}
/**
* Restores a {@code Bucket} from the state included in the provided {@link
* FileWriterBucketState}.
*
* @param bucketWriter the {@link BucketWriter} used to write part files in the bucket.
* @param bucketState the initial state of the restored bucket.
* @param <IN> the type of input elements to the sink.
* @param outputFileConfig the part file configuration.
* @return The restored Bucket.
*/
static <IN> FileWriterBucket<IN> restore(
final BucketWriter<IN, String> bucketWriter,
final RollingPolicy<IN, String> rollingPolicy,
final FileWriterBucketState bucketState,
final OutputFileConfig outputFileConfig)
throws IOException {
return new FileWriterBucket<>(bucketWriter, rollingPolicy, bucketState, outputFileConfig);
}
}
| FileWriterBucket |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java | {
"start": 3004,
"end": 36800
} | class ____ extends ESTestCase {
private IndexNameExpressionResolver resolver;
private DiscoveryNodes nodes;
private ClusterState clusterState;
private PersistentTasksCustomMetadata tasks;
private MlMetadata mlMetadata;
@Before
public void init() {
resolver = TestIndexNameExpressionResolver.newInstance();
nodes = DiscoveryNodes.builder()
.add(
DiscoveryNodeUtils.create(
"node_name",
"node_id",
new TransportAddress(InetAddress.getLoopbackAddress(), 9300),
Collections.emptyMap(),
Collections.emptySet()
)
)
.build();
mlMetadata = new MlMetadata.Builder().build();
}
public void testSelectNode_GivenJobIsOpened() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertEquals("node_id", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testSelectNode_GivenJobIsOpenedAndDataStream() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterStateWithDatastream("foo", 1, 0, Collections.singletonList(new Tuple<>(0, ShardRoutingState.STARTED)));
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertEquals("node_id", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testSelectNode_GivenJobIsOpening() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", null, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertEquals("node_id", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testNoJobTask() {
Job job = createScheduledJob("job_id").build(new Date());
// Using wildcard index name to test for index resolving as well
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*"));
tasks = PersistentTasksCustomMetadata.builder().build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNull(result.getExecutorNode());
assertThat(
result.getExplanation(),
equalTo(
"cannot start datafeed [datafeed_id], because the job's [job_id] state is " + "[closed] while state [opened] is required"
)
);
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(
e.getMessage(),
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id], because the job's [job_id] state is [closed] while state [opened] is required]"
)
);
}
public void testSelectNode_GivenJobFailedOrClosed() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
JobState jobState = randomFrom(JobState.FAILED, JobState.CLOSED);
addJobTask(job.getId(), "node_id", jobState, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNull(result.getExecutorNode());
assertEquals(
"cannot start datafeed [datafeed_id], because the job's [job_id] state is [" + jobState + "] while state [opened] is required",
result.getExplanation()
);
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(
e.getMessage(),
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id], because the job's [job_id] state is ["
+ jobState
+ "] while state [opened] is required]"
)
);
}
public void testShardUnassigned() {
Job job = createScheduledJob("job_id").build(new Date());
// Using wildcard index name to test for index resolving as well
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
List<Tuple<Integer, ShardRoutingState>> states = new ArrayList<>(2);
states.add(new Tuple<>(0, ShardRoutingState.UNASSIGNED));
givenClusterState("foo", 1, 0, states);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNull(result.getExecutorNode());
assertThat(
result.getExplanation(),
equalTo("cannot start datafeed [datafeed_id] because index [foo] " + "does not have all primary shards active yet.")
);
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testShardNotAllActive() {
Job job = createScheduledJob("job_id").build(new Date());
// Using wildcard index name to test for index resolving as well
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
List<Tuple<Integer, ShardRoutingState>> states = new ArrayList<>(2);
states.add(new Tuple<>(0, ShardRoutingState.STARTED));
states.add(new Tuple<>(1, ShardRoutingState.INITIALIZING));
givenClusterState("foo", 2, 0, states);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNull(result.getExecutorNode());
assertThat(
result.getExplanation(),
equalTo("cannot start datafeed [datafeed_id] because index [foo] " + "does not have all primary shards active yet.")
);
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testIndexDoesntExist() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNull(result.getExecutorNode());
assertThat(
result.getExplanation(),
anyOf(
// TODO remove this first option and only allow the second once the failure store functionality is permanently switched on
equalTo(
"cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and "
+ "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, "
+ "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, "
+ "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] "
+ "with exception [no such index [not_foo]]"
),
equalTo(
"cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and "
+ "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, "
+ "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, "
+ "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, "
+ "allow_selectors=true, include_failure_indices=false, resolve_cross_project_index_expression=false]] "
+ "with exception [no such index [not_foo]]"
)
)
);
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(
e.getMessage(),
anyOf(
// TODO remove this first option and only allow the second once the failure store functionality is permanently switched on
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id] because it failed resolving "
+ "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, "
+ "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, "
+ "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true"
+ "]] with exception [no such index [not_foo]]]"
),
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id] because it failed resolving "
+ "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, "
+ "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, "
+ "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, "
+ "ignore_throttled=true, allow_selectors=true, include_failure_indices=false,"
+ " resolve_cross_project_index_expression=false]] with exception [no such index [not_foo]]]"
)
)
);
}
public void testIndexPatternDoesntExist() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "foo*"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertEquals("node_id", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testLocalIndexPatternWithoutMatchingIndicesAndRemoteIndexPattern() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "remote:index-*"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertEquals("node_id", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testRemoteIndex() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertNotNull(result.getExecutorNode());
}
public void testSelectNode_jobTaskStale() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
String nodeId = randomBoolean() ? "node_id2" : null;
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), nodeId, JobState.OPENED, tasksBuilder);
// Set to lower allocationId, so job task is stale:
tasksBuilder.updateTaskState(MlTasks.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0, null, Instant.now()));
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
Collection<DiscoveryNode> candidateNodes = makeCandidateNodes("node_id1", "node_id2", "node_id3");
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(candidateNodes);
assertNull(result.getExecutorNode());
assertEquals("cannot start datafeed [datafeed_id], because the job's [job_id] state is stale", result.getExplanation());
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(
e.getMessage(),
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id], because the job's [job_id] state is stale]"
)
);
tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id1", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(candidateNodes);
assertEquals("node_id1", result.getExecutorNode());
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() {
// Here we test that when there are 2 problems, the most critical gets reported first.
// In this case job is Opening (non-critical) and the index does not exist (critical)
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENING, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(
e.getMessage(),
anyOf(
// TODO remove this first option and only allow the second once the failure store functionality is permanently switched on
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and "
+ "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, "
+ "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, "
+ "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] "
+ "with exception [no such index [not_foo]]]"
),
containsString(
"No node found to start datafeed [datafeed_id], allocation explanation "
+ "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and "
+ "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, "
+ "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, "
+ "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, "
+ "allow_selectors=true, include_failure_indices=false, resolve_cross_project_index_expression=false]]"
+ " with exception [no such index [not_foo]]]"
)
)
);
}
public void testSelectNode_GivenMlUpgradeMode() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertThat(result, equalTo(MlTasks.AWAITING_UPGRADE));
}
public void testSelectNode_GivenResetInProgress() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
mlMetadata = new MlMetadata.Builder().isResetMode(true).build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("node_id", "other_node_id"));
assertThat(result, equalTo(MlTasks.RESET_IN_PROGRESS));
}
public void testCheckDatafeedTaskCanBeCreated_GivenMlUpgradeMode() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build();
givenClusterState("foo", 1, 0);
ElasticsearchException e = expectThrows(
ElasticsearchException.class,
() -> new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).checkDatafeedTaskCanBeCreated()
);
assertThat(e.getMessage(), equalTo("Could not start datafeed [datafeed_id] as indices are being upgraded"));
}
public void testSelectNode_GivenJobIsOpenedAndNodeIsShuttingDown() {
Job job = createScheduledJob("job_id").build(new Date());
DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo"));
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
tasks = tasksBuilder.build();
givenClusterState("foo", 1, 0);
PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector(
clusterState,
resolver,
df.getId(),
df.getJobId(),
df.getIndices(),
SearchRequest.DEFAULT_INDICES_OPTIONS
).selectNode(makeCandidateNodes("other_node_id"));
assertNull(result.getExecutorNode());
assertEquals("datafeed awaiting job relocation.", result.getExplanation());
// This is different to the pattern of the other tests - we allow the datafeed task to be
// created even though it cannot be assigned. The reason is that it would be perverse for
// start datafeed to throw an error just because a user was unlucky and opened a job just
// before a node got shut down, such that their subsequent call to start its datafeed arrived
// after that node was shutting down.
new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS)
.checkDatafeedTaskCanBeCreated();
}
private void givenClusterState(String index, int numberOfShards, int numberOfReplicas) {
List<Tuple<Integer, ShardRoutingState>> states = new ArrayList<>(1);
states.add(new Tuple<>(0, ShardRoutingState.STARTED));
givenClusterState(index, numberOfShards, numberOfReplicas, states);
}
private void givenClusterState(String index, int numberOfShards, int numberOfReplicas, List<Tuple<Integer, ShardRoutingState>> states) {
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(settings(IndexVersion.current()))
.numberOfShards(numberOfShards)
.numberOfReplicas(numberOfReplicas)
.build();
clusterState = ClusterState.builder(new ClusterName("cluster_name"))
.metadata(
new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks)
.putCustom(MlMetadata.TYPE, mlMetadata)
.put(indexMetadata, false)
)
.nodes(nodes)
.routingTable(generateRoutingTable(indexMetadata, states))
.build();
}
private void givenClusterStateWithDatastream(
String dataStreamName,
int numberOfShards,
int numberOfReplicas,
List<Tuple<Integer, ShardRoutingState>> states
) {
Index index = new Index(getDefaultBackingIndexName(dataStreamName, 1), INDEX_UUID_NA_VALUE);
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(settings(IndexVersion.current()))
.numberOfShards(numberOfShards)
.numberOfReplicas(numberOfReplicas)
.build();
clusterState = ClusterState.builder(new ClusterName("cluster_name"))
.metadata(
new Metadata.Builder().put(DataStreamTestHelper.newInstance(dataStreamName, Collections.singletonList(index)))
.putCustom(PersistentTasksCustomMetadata.TYPE, tasks)
.putCustom(MlMetadata.TYPE, mlMetadata)
.put(indexMetadata, false)
)
.nodes(nodes)
.routingTable(generateRoutingTable(indexMetadata, states))
.build();
}
private static RoutingTable generateRoutingTable(IndexMetadata indexMetadata, List<Tuple<Integer, ShardRoutingState>> states) {
IndexRoutingTable.Builder rtBuilder = IndexRoutingTable.builder(indexMetadata.getIndex());
final String index = indexMetadata.getIndex().getName();
int counter = 0;
for (Tuple<Integer, ShardRoutingState> state : states) {
ShardId shardId = new ShardId(index, "_na_", counter);
IndexShardRoutingTable.Builder shardRTBuilder = new IndexShardRoutingTable.Builder(shardId);
ShardRouting shardRouting;
if (state.v2().equals(ShardRoutingState.STARTED)) {
shardRouting = TestShardRouting.newShardRouting(
index,
shardId.getId(),
"node_" + Integer.toString(state.v1()),
null,
true,
ShardRoutingState.STARTED
);
} else if (state.v2().equals(ShardRoutingState.INITIALIZING)) {
shardRouting = TestShardRouting.newShardRouting(
index,
shardId.getId(),
"node_" + Integer.toString(state.v1()),
null,
true,
ShardRoutingState.INITIALIZING
);
} else if (state.v2().equals(ShardRoutingState.RELOCATING)) {
shardRouting = TestShardRouting.newShardRouting(
index,
shardId.getId(),
"node_" + Integer.toString(state.v1()),
"node_" + Integer.toString((state.v1() + 1) % 3),
true,
ShardRoutingState.RELOCATING
);
} else {
shardRouting = ShardRouting.newUnassigned(
shardId,
true,
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""),
ShardRouting.Role.DEFAULT
);
}
shardRTBuilder.addShard(shardRouting);
rtBuilder.addIndexShard(shardRTBuilder);
counter += 1;
}
return new RoutingTable.Builder().add(rtBuilder).build();
}
Collection<DiscoveryNode> makeCandidateNodes(String... nodeIds) {
List<DiscoveryNode> candidateNodes = new ArrayList<>();
int port = 9300;
for (String nodeId : nodeIds) {
candidateNodes.add(
DiscoveryNodeUtils.create(
nodeId + "-name",
nodeId,
new TransportAddress(InetAddress.getLoopbackAddress(), port++),
Collections.emptyMap(),
DiscoveryNodeRole.roles()
)
);
}
return candidateNodes;
}
}
| DatafeedNodeSelectorTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_containsNull_Test.java | {
"start": 948,
"end": 1288
} | class ____ extends ObjectArrayAssertBaseTest {
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.containsNull();
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsNull(getInfo(assertions), getActual(assertions));
}
}
| ObjectArrayAssert_containsNull_Test |
java | spring-projects__spring-boot | integration-test/spring-boot-server-integration-tests/spring-boot-server-tests-app/src/main/java/com/example/ResourceHandlingApplication.java | {
"start": 2509,
"end": 3229
} | class ____ extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
collectResourcePaths("/").forEach(resp.getWriter()::println);
resp.getWriter().flush();
}
private Set<String> collectResourcePaths(String path) {
Set<String> allResourcePaths = new LinkedHashSet<>();
Set<String> pathsForPath = getServletContext().getResourcePaths(path);
if (pathsForPath != null) {
for (String resourcePath : pathsForPath) {
allResourcePaths.add(resourcePath);
allResourcePaths.addAll(collectResourcePaths(resourcePath));
}
}
return allResourcePaths;
}
}
private static final | GetResourcePathsServlet |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java | {
"start": 1280,
"end": 5310
} | class ____ {
private static final Random RANDOM = new Random();
public static MD5Hash getTestHash() throws Exception {
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buffer = new byte[1024];
RANDOM.nextBytes(buffer);
digest.update(buffer);
return new MD5Hash(digest.digest());
}
protected static byte[] D00 = new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
protected static byte[] DFF = new byte[] {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
@Test
public void testMD5Hash() throws Exception {
MD5Hash md5Hash = getTestHash();
final MD5Hash md5Hash00
= new MD5Hash(D00);
final MD5Hash md5HashFF
= new MD5Hash(DFF);
MD5Hash orderedHash = new MD5Hash(new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,
13,14,15,16});
MD5Hash backwardHash = new MD5Hash(new byte[]{-1,-2,-3,-4,-5,-6,-7,-8,
-9,-10,-11,-12, -13, -14,
-15,-16});
MD5Hash closeHash1 = new MD5Hash(new byte[]{-1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0});
MD5Hash closeHash2 = new MD5Hash(new byte[]{-1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0});
// test i/o
TestWritable.testWritable(md5Hash);
TestWritable.testWritable(md5Hash00);
TestWritable.testWritable(md5HashFF);
// test equals()
assertEquals(md5Hash, md5Hash);
assertEquals(md5Hash00, md5Hash00);
assertEquals(md5HashFF, md5HashFF);
// test compareTo()
assertTrue(md5Hash.compareTo(md5Hash) == 0);
assertTrue(md5Hash00.compareTo(md5Hash) < 0);
assertTrue(md5HashFF.compareTo(md5Hash) > 0);
// test toString and string ctor
assertEquals(md5Hash, new MD5Hash(md5Hash.toString()));
assertEquals(md5Hash00, new MD5Hash(md5Hash00.toString()));
assertEquals(md5HashFF, new MD5Hash(md5HashFF.toString()));
assertEquals(0x01020304, orderedHash.quarterDigest());
assertEquals(0xfffefdfc, backwardHash.quarterDigest());
assertEquals(0x0102030405060708L, orderedHash.halfDigest());
assertEquals(0xfffefdfcfbfaf9f8L, backwardHash.halfDigest());
assertTrue(closeHash1.hashCode() != closeHash2.hashCode(),
"hash collision");
SubjectInheritingThread t1 = new SubjectInheritingThread() {
@Override
public void work() {
for (int i = 0; i < 100; i++) {
MD5Hash hash = new MD5Hash(DFF);
assertEquals(hash, md5HashFF);
}
}
};
SubjectInheritingThread t2 = new SubjectInheritingThread() {
@Override
public void work() {
for (int i = 0; i < 100; i++) {
MD5Hash hash = new MD5Hash(D00);
assertEquals(hash, md5Hash00);
}
}
};
t1.start();
t2.start();
t1.join();
t2.join();
}
@Test
public void testFactoryReturnsClearedHashes() throws IOException {
// A stream that will throw an IOE after reading some bytes
ByteArrayInputStream failingStream = new ByteArrayInputStream(
"xxxx".getBytes()) {
@Override
public synchronized int read(byte[] b) throws IOException {
int ret = super.read(b);
if (ret <= 0) {
throw new IOException("Injected fault");
}
return ret;
}
};
final String TEST_STRING = "hello";
// Calculate the correct digest for the test string
MD5Hash expectedHash = MD5Hash.digest(TEST_STRING);
// Hashing again should give the same result
assertEquals(expectedHash, MD5Hash.digest(TEST_STRING));
// Try to hash a stream which will fail halfway through
try {
MD5Hash.digest(failingStream);
fail("didnt throw!");
} catch (Exception e) {
// expected
}
// Make sure we get the same result
assertEquals(expectedHash, MD5Hash.digest(TEST_STRING));
}
}
| TestMD5Hash |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/ParserHelper.java | {
"start": 475,
"end": 1194
} | class ____. The package name is optional for JDK types from the {@code java.lang}
* package. Parameterized types are supported, however wildcards are always ignored - only the upper/lower bound is taken
* into account. For example, the type info {@code java.util.List<? extends org.acme.Foo>} is recognized as
* {@code java.util.List<org.acme.Foo> list}. Type variables are not handled in a special way and should never be used.
*
* @param name
* @param type
*/
void addParameter(String name, String type);
/**
* The filter is used before the template contents is parsed.
*
* @param filter
*/
void addContentFilter(Function<String, String> filter);
}
| name |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/SnapshotDirectory.java | {
"start": 1197,
"end": 1689
} | class ____
* some method that simplify resource management when dealing with such directories, e.g. it can
* produce a {@link DirectoryStateHandle} when the snapshot is completed and disposal considers
* whether or not a snapshot was already completed. For a completed snapshot, the ownership for
* cleanup is transferred to the created directory state handle. For incomplete snapshots, calling
* {@link #cleanup()} will delete the underlying directory resource.
*/
public abstract | provides |
java | elastic__elasticsearch | modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/FieldCapsRankFeatureTests.java | {
"start": 1154,
"end": 5044
} | class ____ extends ESIntegTestCase {
private final String INDEX = "index-1";
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
var plugins = new ArrayList<>(super.nodePlugins());
plugins.add(MapperExtrasPlugin.class);
return plugins;
}
@Before
public void setUpIndices() {
assertAcked(
prepareCreate(INDEX).setWaitForActiveShards(ActiveShardCount.ALL)
.setSettings(indexSettings())
.setMapping("fooRank", "type=rank_feature", "barRank", "type=rank_feature")
);
}
public void testRankFeatureInIndex() {
FieldCapabilitiesResponse response = client().prepareFieldCaps(INDEX).setFields("*").setincludeEmptyFields(false).get();
assertFalse(response.get().containsKey("fooRank"));
assertFalse(response.get().containsKey("barRank"));
prepareIndex(INDEX).setSource("fooRank", 8).setSource("barRank", 8).get();
refresh(INDEX);
response = client().prepareFieldCaps(INDEX).setFields("*").setincludeEmptyFields(false).get();
assertEquals(1, response.getIndices().length);
assertEquals(response.getIndices()[0], INDEX);
assertThat(response.get(), Matchers.hasKey("fooRank"));
// Check the capabilities for the 'fooRank' field.
Map<String, FieldCapabilities> fooRankField = response.getField("fooRank");
assertEquals(1, fooRankField.size());
assertThat(fooRankField, Matchers.hasKey("rank_feature"));
assertEquals(fieldCapabilities("fooRank"), fooRankField.get("rank_feature"));
}
public void testRankFeatureInIndexAfterRestart() throws Exception {
prepareIndex(INDEX).setSource("fooRank", 8).get();
internalCluster().fullRestart();
ensureGreen(INDEX);
FieldCapabilitiesResponse response = client().prepareFieldCaps(INDEX).setFields("*").setincludeEmptyFields(false).get();
assertEquals(1, response.getIndices().length);
assertEquals(response.getIndices()[0], INDEX);
assertThat(response.get(), Matchers.hasKey("fooRank"));
// Check the capabilities for the 'fooRank' field.
Map<String, FieldCapabilities> fooRankField = response.getField("fooRank");
assertEquals(1, fooRankField.size());
assertThat(fooRankField, Matchers.hasKey("rank_feature"));
assertEquals(fieldCapabilities("fooRank"), fooRankField.get("rank_feature"));
}
public void testAllRankFeatureReturnedIfOneIsPresent() {
prepareIndex(INDEX).setSource("fooRank", 8).get();
refresh(INDEX);
FieldCapabilitiesResponse response = client().prepareFieldCaps(INDEX).setFields("*").setincludeEmptyFields(false).get();
assertEquals(1, response.getIndices().length);
assertEquals(response.getIndices()[0], INDEX);
assertThat(response.get(), Matchers.hasKey("fooRank"));
// Check the capabilities for the 'fooRank' field.
Map<String, FieldCapabilities> fooRankField = response.getField("fooRank");
assertEquals(1, fooRankField.size());
assertThat(fooRankField, Matchers.hasKey("rank_feature"));
assertEquals(fieldCapabilities("fooRank"), fooRankField.get("rank_feature"));
assertThat(response.get(), Matchers.hasKey("barRank"));
// Check the capabilities for the 'barRank' field.
Map<String, FieldCapabilities> barRankField = response.getField("barRank");
assertEquals(1, barRankField.size());
assertThat(barRankField, Matchers.hasKey("rank_feature"));
assertEquals(fieldCapabilities("barRank"), barRankField.get("rank_feature"));
}
private static FieldCapabilities fieldCapabilities(String fieldName) {
return new FieldCapabilitiesBuilder(fieldName, "rank_feature").isAggregatable(false).build();
}
}
| FieldCapsRankFeatureTests |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/support/HistogramValuesSource.java | {
"start": 1396,
"end": 2512
} | class ____ extends Histogram {
protected final IndexHistogramFieldData indexFieldData;
public Fielddata(IndexHistogramFieldData indexFieldData) {
this.indexFieldData = indexFieldData;
}
@Override
public SortedBinaryDocValues bytesValues(LeafReaderContext context) {
return indexFieldData.load(context).getBytesValues();
}
@Override
public DocValueBits docsWithValue(LeafReaderContext context) throws IOException {
HistogramValues values = getHistogramValues(context);
return new DocValueBits() {
@Override
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
};
}
@Override
public HistogramValues getHistogramValues(LeafReaderContext context) throws IOException {
return indexFieldData.load(context).getHistogramValues();
}
}
}
}
| Fielddata |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/cookie/ClientCookieEncoder.java | {
"start": 1229,
"end": 2245
} | interface ____ {
/**
* The default {@link ServerCookieEncoder} instance.
*/
ClientCookieEncoder INSTANCE = SoftServiceLoader
.load(ClientCookieEncoder.class)
.firstOr("io.micronaut.http.cookie.DefaultClientCookieEncoder", ClientCookieEncoder.class.getClassLoader())
.map(ServiceDefinition::load)
.orElse(null);
/**
* Encodes a {@link Cookie} into a String. Typically used to set the {@link io.micronaut.http.HttpHeaders#COOKIE} value for example in an HTTP Client.
* The cookie gets serialized by concatenating the cookie's name, the %x3D ("=") character, and the cookie's value.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc6265#section-5.4">Cookie Header</a>.
* @param cookie Cookie to encode
* @return The cookie serialized into a string by concatenating the cookie's name, the %x3D ("=") character, and the cookie's value.
*/
@NonNull
String encode(@NonNull Cookie cookie);
}
| ClientCookieEncoder |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java | {
"start": 1021,
"end": 2361
} | class ____ {
public static int divideAndCeil(long a, long b) {
if (b == 0) {
return 0;
}
return (int) ((a + (b - 1)) / b);
}
public static int computeAvailableContainers(Resource available,
Resource required, EnumSet<SchedulerResourceTypes> resourceTypes) {
if (resourceTypes.contains(SchedulerResourceTypes.CPU)) {
return Math.min(
calculateRatioOrMaxValue(available.getMemorySize(), required.getMemorySize()),
calculateRatioOrMaxValue(available.getVirtualCores(), required
.getVirtualCores()));
}
return calculateRatioOrMaxValue(
available.getMemorySize(), required.getMemorySize());
}
public static int divideAndCeilContainers(Resource required, Resource factor,
EnumSet<SchedulerResourceTypes> resourceTypes) {
if (resourceTypes.contains(SchedulerResourceTypes.CPU)) {
return Math.max(divideAndCeil(required.getMemorySize(), factor.getMemorySize()),
divideAndCeil(required.getVirtualCores(), factor.getVirtualCores()));
}
return divideAndCeil(required.getMemorySize(), factor.getMemorySize());
}
private static int calculateRatioOrMaxValue(long numerator, long denominator) {
if (denominator == 0) {
return Integer.MAX_VALUE;
}
return (int) (numerator / denominator);
}
}
| ResourceCalculatorUtils |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/AssertAllAssertionsTests.java | {
"start": 6269,
"end": 6317
} | class ____ extends Throwable {
}
}
| EnigmaThrowable |
java | quarkusio__quarkus | integration-tests/maven/src/test/java/io/quarkus/maven/it/AddExtensionIT.java | {
"start": 925,
"end": 6937
} | class ____ extends QuarkusPlatformAwareMojoTestBase {
private static final String QUARKUS_GROUPID = "io.quarkus";
private static final String BOM_ARTIFACT_ID = "quarkus-bom";
private static final String VERTX_ARTIFACT_ID = "quarkus-vertx";
private static final String COMMONS_CODEC = "commons-codec";
private static final String PROJECT_SOURCE_DIR = "projects/classic";
private File testDir;
private Invoker invoker;
@Test
void testAddExtensionWithASingleExtension() throws MavenInvocationException, IOException {
testDir = initProject(PROJECT_SOURCE_DIR, "projects/testAddExtensionWithASingleExtension");
invoker = initInvoker(testDir);
addExtension(false, VERTX_ARTIFACT_ID);
Model model = loadPom(testDir);
Dependency expected = new Dependency();
expected.setGroupId(QUARKUS_GROUPID);
expected.setArtifactId(VERTX_ARTIFACT_ID);
assertThat(contains(model.getDependencies(), expected)).isTrue();
}
@Test
void testAddExtensionWithASingleExtensionToSubmodule() throws MavenInvocationException, IOException {
testDir = initProject("projects/multimodule", "projects/testAddExtensionWithASingleExtensionToSubmodule");
testDir = new File(testDir, "runner");
invoker = initInvoker(testDir);
addExtension(false, VERTX_ARTIFACT_ID);
Model model = loadPom(testDir);
Dependency expected = new Dependency();
expected.setGroupId(QUARKUS_GROUPID);
expected.setArtifactId(VERTX_ARTIFACT_ID);
assertThat(contains(model.getDependencies(), expected)).isTrue();
assertThat(Optional.ofNullable(model.getDependencyManagement())
.map(DependencyManagement::getDependencies).orElse(Collections.emptyList())).isEmpty();
}
@Test
void testAddExtensionWithMultipleExtension() throws MavenInvocationException, IOException {
testDir = initProject(PROJECT_SOURCE_DIR, "projects/testAddExtensionWithMultipleExtension");
invoker = initInvoker(testDir);
addExtension(false, "quarkus-vertx, commons-codec:commons-codec:1.15");
Model model = loadPom(testDir);
Dependency expected1 = new Dependency();
expected1.setGroupId(QUARKUS_GROUPID);
expected1.setArtifactId(VERTX_ARTIFACT_ID);
Dependency expected2 = new Dependency();
expected2.setGroupId(COMMONS_CODEC);
expected2.setArtifactId(COMMONS_CODEC);
expected2.setVersion("1.15");
assertThat(contains(model.getDependencies(), expected1)).isTrue();
assertThat(contains(model.getDependencies(), expected2)).isTrue();
}
@Test
void testAddExtensionWithASingleExtensionWithPluralForm() throws MavenInvocationException, IOException {
testDir = initProject(PROJECT_SOURCE_DIR,
"projects/testAddExtensionWithASingleExtensionWithPluralForm");
invoker = initInvoker(testDir);
addExtension(true, VERTX_ARTIFACT_ID);
Model model = loadPom(testDir);
Dependency expected = new Dependency();
expected.setGroupId(QUARKUS_GROUPID);
expected.setArtifactId(VERTX_ARTIFACT_ID);
assertThat(contains(model.getDependencies(), expected)).isTrue();
}
@Test
void testAddExtensionWithMultipleExtensionsAndPluralForm() throws MavenInvocationException, IOException {
testDir = initProject(PROJECT_SOURCE_DIR,
"projects/testAddExtensionWithMultipleExtensionAndPluralForm");
invoker = initInvoker(testDir);
addExtension(true, "quarkus-vertx, commons-codec:commons-codec:1.15");
Model model = loadPom(testDir);
Dependency expected1 = new Dependency();
expected1.setGroupId(QUARKUS_GROUPID);
expected1.setArtifactId(VERTX_ARTIFACT_ID);
Dependency expected2 = new Dependency();
expected2.setGroupId(COMMONS_CODEC);
expected2.setArtifactId(COMMONS_CODEC);
expected2.setVersion("1.15");
assertThat(contains(model.getDependencies(), expected1)).isTrue();
assertThat(contains(model.getDependencies(), expected2)).isTrue();
}
private boolean contains(List<Dependency> dependencies, Dependency expected) {
return dependencies.stream().anyMatch(dep -> dep.getGroupId().equals(expected.getGroupId())
&& dep.getArtifactId().equals(expected.getArtifactId())
&& (expected.getVersion() == null ? dep.getVersion() == null : expected.getVersion().equals(dep.getVersion()))
&& (dep.getScope() == null || dep.getScope().equals(expected.getScope()))
&& dep.isOptional() == expected.isOptional()
&& dep.getType().equals(expected.getType()));
}
private void addExtension(boolean plural, String ext)
throws MavenInvocationException, FileNotFoundException, UnsupportedEncodingException {
InvocationRequest request = new DefaultInvocationRequest();
request.setBatchMode(true);
request.setGoals(Collections.singletonList(
getMavenPluginGroupId() + ":" + getMavenPluginArtifactId() + ":" + getMavenPluginVersion() + ":add-extension"));
Properties properties = new Properties();
properties.setProperty("platformGroupId", QUARKUS_GROUPID);
properties.setProperty("platformArtifactId", BOM_ARTIFACT_ID);
properties.setProperty("platformVersion", getQuarkusCoreVersion());
if (plural) {
properties.setProperty("extensions", ext);
} else {
properties.setProperty("extension", ext);
}
request.setProperties(properties);
File log = new File(testDir, "build-add-extension-" + testDir.getName() + ".log");
PrintStreamLogger logger = new PrintStreamLogger(new PrintStream(new FileOutputStream(log), false, "UTF-8"),
InvokerLogger.DEBUG);
invoker.setLogger(logger);
invoker.execute(request);
}
}
| AddExtensionIT |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/data/ParcelFileDescriptorRewinder.java | {
"start": 445,
"end": 1420
} | class ____ implements DataRewinder<ParcelFileDescriptor> {
private final InternalRewinder rewinder;
public static boolean isSupported() {
// Os.lseek() is only supported on API 21+ and does not work in Robolectric.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP
&& !"robolectric".equals(Build.FINGERPRINT);
}
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
public ParcelFileDescriptorRewinder(ParcelFileDescriptor parcelFileDescriptor) {
rewinder = new InternalRewinder(parcelFileDescriptor);
}
@NonNull
@Override
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
public ParcelFileDescriptor rewindAndGet() throws IOException {
return rewinder.rewind();
}
@Override
public void cleanup() {
// Do nothing.
}
/**
* Factory for producing {@link ParcelFileDescriptorRewinder}s from {@link ParcelFileDescriptor}s.
*/
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
public static final | ParcelFileDescriptorRewinder |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/calendar/RestGetCalendarEventsAction.java | {
"start": 1182,
"end": 3019
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(GET, BASE_PATH + "calendars/{" + ID + "}/events"));
}
@Override
public String getName() {
return "ml_get_calendar_events_action";
}
@Override
protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
GetCalendarEventsAction.Request request;
if (restRequest.hasContentOrSourceParam()) {
try (XContentParser parser = restRequest.contentOrSourceParamParser()) {
request = GetCalendarEventsAction.Request.parseRequest(calendarId, parser);
}
} else {
request = new GetCalendarEventsAction.Request(calendarId);
request.setStart(restRequest.param(GetCalendarEventsAction.Request.START.getPreferredName(), null));
request.setEnd(restRequest.param(GetCalendarEventsAction.Request.END.getPreferredName(), null));
request.setJobId(restRequest.param(Job.ID.getPreferredName(), null));
if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) {
request.setPageParams(
new PageParams(
restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM),
restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE)
)
);
}
}
return channel -> client.execute(GetCalendarEventsAction.INSTANCE, request, new RestToXContentListener<>(channel));
}
}
| RestGetCalendarEventsAction |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/CallableInjectionPoint.java | {
"start": 857,
"end": 1025
} | interface ____<T> extends InjectionPoint<T> {
/**
*
* @return The required argument types.
*/
Argument<?>[] getArguments();
}
| CallableInjectionPoint |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/AuthenticationException.java | {
"start": 950,
"end": 2585
} | class ____ extends RuntimeException {
@Serial
private static final long serialVersionUID = 2018827803361503060L;
private @Nullable Authentication authenticationRequest;
/**
* Constructs an {@code AuthenticationException} with the specified message and root
* cause.
* @param msg the detail message
* @param cause the root cause
*/
public AuthenticationException(@Nullable String msg, Throwable cause) {
super(msg, cause);
}
/**
* Constructs an {@code AuthenticationException} with the specified message and no
* root cause.
* @param msg the detail message
*/
public AuthenticationException(@Nullable String msg) {
super(msg);
}
/**
* Get the {@link Authentication} object representing the failed authentication
* attempt.
* <p>
* This field captures the authentication request that was attempted but ultimately
* failed, providing critical information for diagnosing the failure and facilitating
* debugging
* @since 6.5
*/
public @Nullable Authentication getAuthenticationRequest() {
return this.authenticationRequest;
}
/**
* Set the {@link Authentication} object representing the failed authentication
* attempt.
* <p>
* The provided {@code authenticationRequest} should not be null
* @param authenticationRequest the authentication request associated with the failed
* authentication attempt
* @since 6.5
*/
public void setAuthenticationRequest(@Nullable Authentication authenticationRequest) {
Assert.notNull(authenticationRequest, "authenticationRequest cannot be null");
this.authenticationRequest = authenticationRequest;
}
}
| AuthenticationException |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/consumer/support/interop/JMSDeserializer.java | {
"start": 980,
"end": 2798
} | class ____ implements KafkaHeaderDeserializer {
public boolean isLong(byte[] bytes) {
return bytes.length == Long.BYTES;
}
private static long bytesToLong(byte[] bytes) {
final ByteBuffer buffer = toByteBuffer(bytes, Long.BYTES);
return buffer.getLong();
}
private static int bytesToInt(byte[] bytes) {
final ByteBuffer buffer = toByteBuffer(bytes, Integer.BYTES);
return buffer.getInt();
}
private static ByteBuffer toByteBuffer(byte[] bytes, int size) {
ByteBuffer buffer = ByteBuffer.allocate(size);
buffer.put(bytes);
buffer.flip();
return buffer;
}
@Override
public Object deserialize(String key, byte[] value) {
if (key.startsWith("JMS")) {
switch (key) {
case "JMSDestination":
return new String(value);
case "JMSDeliveryMode":
return bytesToInt(value);
case "JMSTimestamp":
return bytesToLong(value);
case "JMSCorrelationID":
return new String(value);
case "JMSReplyTo":
return new String(value);
case "JMSRedelivered":
return Boolean.parseBoolean(new String(value));
case "JMSType":
return new String(value);
case "JMSExpiration":
return isLong(value) ? bytesToLong(value) : bytesToInt(value);
case "JMSPriority":
return bytesToInt(value);
case "JMSMessageID":
return new String(value);
default:
return value;
}
}
return value;
}
}
| JMSDeserializer |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/bytearray/ByteArrayAssert_isEmpty_Test.java | {
"start": 929,
"end": 1357
} | class ____ extends ByteArrayAssertBaseTest {
@Override
protected ByteArrayAssert invoke_api_method() {
assertions.isEmpty();
return null;
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertEmpty(getInfo(assertions), getActual(assertions));
}
@Override
@Test
public void should_return_this() {
// Disable this test since isEmpty is void
}
}
| ByteArrayAssert_isEmpty_Test |
java | spring-projects__spring-boot | core/spring-boot-test-autoconfigure/src/main/java/org/springframework/boot/test/autoconfigure/TestSliceTestContextBootstrapper.java | {
"start": 1278,
"end": 1462
} | class ____ test slice {@link TestContextBootstrapper test context bootstrappers}.
*
* @param <T> the test slice annotation
* @author Yanming Zhou
* @since 4.0.0
*/
public abstract | for |
java | spring-projects__spring-security | acl/src/test/java/org/springframework/security/acls/jdbc/BasicLookupStrategyTestsDbHelper.java | {
"start": 958,
"end": 1084
} | class ____ initialize the database for BasicLookupStrategyTests.
*
* @author Andrei Stefan
* @author Paul Wheeler
*/
public | to |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/DialectChecks.java | {
"start": 1404,
"end": 1599
} | class ____ implements DialectCheck {
public boolean isMatch(Dialect dialect) {
return dialect.getIdentityColumnSupport().supportsIdentityColumns();
}
}
public static | SupportsIdentityColumns |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/SpringCamelContextShutdownAfterBeanTest.java | {
"start": 1144,
"end": 2543
} | class ____ extends SpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/spring/SpringCamelContextShutdownAfterBeanTest.xml");
}
@Test
public void testShutdown() throws Exception {
// you may have errors during shutdown, which you can see from the log
ShutdownOrderBean order = (ShutdownOrderBean) context.getRegistry().lookupByName("order");
assertEquals(3, order.getStart().size());
assertEquals(0, order.getShutdown().size());
assertEquals("a", order.getStart().get(0));
assertEquals("b", order.getStart().get(1));
assertEquals("c", order.getStart().get(2));
MockEndpoint first = getMockEndpoint("mock:first");
first.expectedMessageCount(5);
for (int i = 0; i < 5; i++) {
template.sendBody("seda:start", "Hello World");
}
first.assertIsSatisfied();
// stop spring to cause shutdown of Camel
applicationContext.close();
assertEquals(3, order.getStart().size());
assertEquals(3, order.getShutdown().size());
assertEquals("c", order.getShutdown().get(0));
assertEquals("b", order.getShutdown().get(1));
assertEquals("a", order.getShutdown().get(2));
}
}
| SpringCamelContextShutdownAfterBeanTest |
java | apache__camel | components/camel-snmp/src/main/java/org/apache/camel/component/snmp/SnmpEndpoint.java | {
"start": 1706,
"end": 13164
} | class ____ extends DefaultPollingEndpoint implements EndpointServiceLocation {
public static final String DEFAULT_COMMUNITY = "public";
public static final int DEFAULT_SNMP_VERSION = SnmpConstants.version1;
public static final int DEFAULT_SNMP_RETRIES = 2;
public static final int DEFAULT_SNMP_TIMEOUT = 1500;
private static final Logger LOG = LoggerFactory.getLogger(SnmpEndpoint.class);
private transient String serverAddress;
@UriPath(description = "Hostname of the SNMP enabled device")
@Metadata(required = true)
private String host;
@UriPath(description = "Port number of the SNMP enabled device")
@Metadata(required = true)
private Integer port;
@UriParam(defaultValue = "udp", enums = "tcp,udp")
private String protocol = "udp";
@UriParam(defaultValue = "" + DEFAULT_SNMP_RETRIES)
private int retries = DEFAULT_SNMP_RETRIES;
@UriParam(defaultValue = "" + DEFAULT_SNMP_TIMEOUT)
private int timeout = DEFAULT_SNMP_TIMEOUT;
@UriParam(defaultValue = "" + DEFAULT_SNMP_VERSION, enums = "0,1,3")
private int snmpVersion = DEFAULT_SNMP_VERSION;
@UriParam(defaultValue = DEFAULT_COMMUNITY)
private String snmpCommunity = DEFAULT_COMMUNITY;
@UriParam
private SnmpActionType type;
@UriParam(label = "consumer", defaultValue = "60000", javaType = "java.time.Duration",
description = "Milliseconds before the next poll.")
private long delay = 60000;
@UriParam(defaultValue = "" + SecurityLevel.AUTH_PRIV, enums = "1,2,3", label = "security")
private int securityLevel = SecurityLevel.AUTH_PRIV;
@UriParam(label = "security", secret = true)
private String securityName;
@UriParam(enums = "MD5,SHA1", label = "security")
private String authenticationProtocol;
@UriParam(label = "security", secret = true)
private String authenticationPassphrase;
@UriParam(label = "security", secret = true)
private String privacyProtocol;
@UriParam(label = "security", secret = true)
private String privacyPassphrase;
@UriParam
private String snmpContextName;
@UriParam
private String snmpContextEngineId;
@UriParam
private OIDList oids = new OIDList();
@UriParam(label = "consumer", defaultValue = "false")
private boolean treeList;
/**
* creates a snmp endpoint
*
* @param uri the endpoint uri
* @param component the component
*/
public SnmpEndpoint(String uri, SnmpComponent component) {
super(uri, component);
super.setDelay(60000);
}
@Override
public String getServiceUrl() {
if (port != null) {
return host + ":" + port;
} else {
return host;
}
}
@Override
public String getServiceProtocol() {
return "snmp";
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
if (this.type == SnmpActionType.TRAP) {
// As the SnmpTrapConsumer is not a polling consumer we don't need to call the configureConsumer here.
return new SnmpTrapConsumer(this, processor);
} else if (this.type == SnmpActionType.POLL) {
SnmpOIDPoller answer = new SnmpOIDPoller(this, processor);
configureConsumer(answer);
return answer;
} else {
throw new IllegalArgumentException("The type '" + this.type + "' is not valid!");
}
}
@Override
public Producer createProducer() throws Exception {
if (this.type == SnmpActionType.TRAP) {
return new SnmpTrapProducer(this);
} else {
// add the support: snmp walk (use snmp4j GET_NEXT)
return new SnmpProducer(this, this.type);
}
}
/**
* creates an exchange for the given message
*
* @param pdu the pdu
* @return an exchange
*/
public Exchange createExchange(PDU pdu) {
Exchange exchange = super.createExchange();
exchange.setIn(new SnmpMessage(getCamelContext(), pdu));
return exchange;
}
/**
* creates and configures the endpoint
*
* @throws Exception if unable to setup connection
* @deprecated use {@link #start()} instead
*/
@Deprecated
public void initiate() throws Exception {
// noop
}
public SnmpActionType getType() {
return this.type;
}
/**
* Which operation to perform such as poll, trap, etc.
*/
public void setType(SnmpActionType type) {
this.type = type;
}
public OIDList getOids() {
return this.oids;
}
/**
* Defines which values you are interested in. Please have a look at the Wikipedia to get a better understanding.
* You may provide a single OID or a coma separated list of OIDs. Example:
* oids="1.3.6.1.2.1.1.3.0,1.3.6.1.2.1.25.3.2.1.5.1,1.3.6.1.2.1.25.3.5.1.1.1,1.3.6.1.2.1.43.5.1.1.11.1"
*/
public void setOids(OIDList oids) {
this.oids = oids;
}
public String getServerAddress() {
return this.serverAddress;
}
public void setServerAddress(String serverAddress) {
this.serverAddress = serverAddress;
}
public int getRetries() {
return this.retries;
}
/**
* Defines how often a retry is made before canceling the request.
*/
public void setRetries(int retries) {
this.retries = retries;
}
public int getTimeout() {
return this.timeout;
}
/**
* Sets the timeout value for the request in millis.
*/
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public int getSnmpVersion() {
return this.snmpVersion;
}
/**
* Sets the snmp version for the request.
* <p/>
* The value 0 means SNMPv1, 1 means SNMPv2c, and the value 3 means SNMPv3
*/
public void setSnmpVersion(int snmpVersion) {
this.snmpVersion = snmpVersion;
}
public String getSnmpCommunity() {
return this.snmpCommunity;
}
/**
* Sets the community octet string for the snmp request.
*/
public void setSnmpCommunity(String snmpCommunity) {
this.snmpCommunity = snmpCommunity;
}
public String getProtocol() {
return this.protocol;
}
/**
* Here you can select which protocol to use. You can use either udp or tcp.
*/
public void setProtocol(String protocol) {
this.protocol = protocol;
}
@Override
protected void doInit() throws Exception {
super.doInit();
URI uri = URI.create(getEndpointUri());
String host = uri.getHost();
int port = uri.getPort();
if (host == null || host.isBlank()) {
host = "127.0.0.1";
}
if (port == -1) {
if (getType() == SnmpActionType.POLL) {
port = 161; // default snmp poll port
} else {
port = 162; // default trap port
}
}
// set the address
String address = String.format("%s:%s/%d", getProtocol(), host, port);
LOG.debug("Using snmp address {}", address);
setServerAddress(address);
}
public int getSecurityLevel() {
return securityLevel;
}
/**
* Sets the security level for this target. The supplied security level must be supported by the security model
* dependent information associated with the security name set for this target.
* <p/>
* The value 1 means: No authentication and no encryption. Anyone can create and read messages with this security
* level The value 2 means: Authentication and no encryption. Only the one with the right authentication key can
* create messages with this security level, but anyone can read the contents of the message. The value 3 means:
* Authentication and encryption. Only the one with the right authentication key can create messages with this
* security level, and only the one with the right encryption/decryption key can read the contents of the message.
*/
public void setSecurityLevel(int securityLevel) {
this.securityLevel = securityLevel;
}
public String getSecurityName() {
return securityName;
}
/**
* Sets the security name to be used with this target.
*/
public void setSecurityName(String securityName) {
this.securityName = securityName;
}
public String getAuthenticationProtocol() {
return authenticationProtocol;
}
/**
* Authentication protocol to use if security level is set to enable authentication The possible values are: MD5,
* SHA1
*/
public void setAuthenticationProtocol(String authenticationProtocol) {
this.authenticationProtocol = authenticationProtocol;
}
public String getAuthenticationPassphrase() {
return authenticationPassphrase;
}
/**
* The authentication passphrase. If not <code>null</code>, <code>authenticationProtocol</code> must also be not
* <code>null</code>. RFC3414 11.2 requires passphrases to have a minimum length of 8 bytes. If the length of
* <code>authenticationPassphrase</code> is less than 8 bytes an <code>IllegalArgumentException</code> is thrown.
*/
public void setAuthenticationPassphrase(String authenticationPassphrase) {
this.authenticationPassphrase = authenticationPassphrase;
}
public String getPrivacyProtocol() {
return privacyProtocol;
}
/**
* The privacy protocol ID to be associated with this user. If set to <code>null</code>, this user only supports
* unencrypted messages.
*/
public void setPrivacyProtocol(String privacyProtocol) {
this.privacyProtocol = privacyProtocol;
}
public String getPrivacyPassphrase() {
return privacyPassphrase;
}
/**
* The privacy passphrase. If not <code>null</code>, <code>privacyProtocol</code> must also be not
* <code>null</code>. RFC3414 11.2 requires passphrases to have a minimum length of 8 bytes. If the length of
* <code>authenticationPassphrase</code> is less than 8 bytes an <code>IllegalArgumentException</code> is thrown.
*/
public void setPrivacyPassphrase(String privacyPassphrase) {
this.privacyPassphrase = privacyPassphrase;
}
public String getSnmpContextName() {
return snmpContextName;
}
/**
* Sets the context name field of this scoped PDU.
*/
public void setSnmpContextName(String snmpContextName) {
this.snmpContextName = snmpContextName;
}
public String getSnmpContextEngineId() {
return snmpContextEngineId;
}
/**
* Sets the context engine ID field of the scoped PDU.
*/
public void setSnmpContextEngineId(String snmpContextEngineId) {
this.snmpContextEngineId = snmpContextEngineId;
}
public boolean isTreeList() {
return treeList;
}
/**
* Sets the flag whether the scoped PDU will be displayed as the list if it has child elements in its tree
*/
public void setTreeList(boolean treeList) {
this.treeList = treeList;
}
@Override
public String toString() {
// only show address to avoid user and password details to be shown
return "snmp://" + serverAddress;
}
}
| SnmpEndpoint |
java | redisson__redisson | redisson/src/main/java/org/redisson/reactive/RedissonReliableTopicReactive.java | {
"start": 901,
"end": 2070
} | class ____ {
private final RReliableTopic topic;
public RedissonReliableTopicReactive(RReliableTopic topic) {
this.topic = topic;
}
public <M> Flux<M> getMessages(Class<M> type) {
return Flux.create(emitter -> {
emitter.onRequest(n -> {
AtomicLong counter = new AtomicLong(n);
AtomicReference<String> idRef = new AtomicReference<>();
RFuture<String> t = topic.addListenerAsync(type, (channel, msg) -> {
emitter.next(msg);
if (counter.decrementAndGet() == 0) {
topic.removeListenerAsync(idRef.get());
emitter.complete();
}
});
t.whenComplete((id, e) -> {
if (e != null) {
emitter.error(e);
return;
}
idRef.set(id);
emitter.onDispose(() -> {
topic.removeListenerAsync(id);
});
});
});
});
}
}
| RedissonReliableTopicReactive |
java | elastic__elasticsearch | x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java | {
"start": 1069,
"end": 2042
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, BASE_PATH + "find_structure"));
}
@Override
public String getName() {
return "text_structure_find_structure_action";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
FindStructureAction.Request request = new FindStructureAction.Request();
RestFindStructureArgumentsParser.parse(restRequest, request);
var content = restRequest.requiredContent();
request.setSample(content);
return channel -> client.execute(
FindStructureAction.INSTANCE,
request,
ActionListener.withRef(new RestToXContentListener<>(channel), content)
);
}
@Override
protected Set<String> responseParams() {
return Collections.singleton(TextStructure.EXPLAIN);
}
}
| RestFindStructureAction |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java | {
"start": 5050,
"end": 6559
} | class ____ extends FilterInitializer {
public Initializer() {
}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("simple", ErrorFilter.class.getName(), null);
}
}
}
@Test
public void testServletFilterWhenInitThrowsException() throws Exception {
Configuration conf = new Configuration();
// start a http server with ErrorFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
ErrorFilter.Initializer.class.getName());
HttpServer2 http = createTestServer(conf);
try {
http.start();
fail("expecting exception");
} catch (IOException e) {
assertEquals("Problem starting http server", e.getMessage());
assertEquals(ErrorFilter.EXCEPTION_MESSAGE, e.getCause().getMessage());
}
}
/**
* Similar to the above test case, except that it uses a different API to add the
* filter. Regression test for HADOOP-8786.
*/
@Test
public void testContextSpecificServletFilterWhenInitThrowsException()
throws Exception {
Configuration conf = new Configuration();
HttpServer2 http = createTestServer(conf);
HttpServer2.defineFilter(http.webAppContext,
"ErrorFilter", ErrorFilter.class.getName(),
null, null);
try {
http.start();
fail("expecting exception");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Unable to initialize WebAppContext", e);
}
}
}
| Initializer |
java | google__dagger | hilt-compiler/main/java/dagger/hilt/processor/internal/definecomponent/DefineComponentMetadatas.java | {
"start": 7917,
"end": 8376
} | class ____ {
/** Returns the component annotated with {@link dagger.hilt.DefineComponent}. */
abstract XTypeElement component();
/** Returns the scopes of the component. */
abstract ImmutableList<XTypeElement> scopes();
/** Returns the parent component, if one exists. */
abstract Optional<DefineComponentMetadata> parentMetadata();
boolean isRoot() {
return !parentMetadata().isPresent();
}
}
}
| DefineComponentMetadata |
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/test/java/org/apache/camel/maven/packaging/endpoint/SomeCommonConstants.java | {
"start": 901,
"end": 1059
} | class ____ {
@Metadata
public static final String KEY_FROM_COMMON = "KEY_FROM_COMMON";
protected SomeCommonConstants() {
}
}
| SomeCommonConstants |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/token/Token.java | {
"start": 1291,
"end": 2283
} | interface ____ {
/**
* Obtains the randomised, secure key assigned to this token. Presentation of this
* token to {@link TokenService} will always return a <code>Token</code> that is equal
* to the original <code>Token</code> issued for that key.
* @return a key with appropriate randomness and security.
*/
String getKey();
/**
* The time the token key was initially created is available from this method. Note
* that a given token must never have this creation time changed. If necessary, a new
* token can be requested from the {@link TokenService} to replace the original token.
* @return the time this token key was created, in the same format as specified by
* {@link java.util.Date#getTime()}.
*/
long getKeyCreationTime();
/**
* Obtains the extended information associated within the token, which was presented
* when the token was first created.
* @return the user-specified extended information, if any
*/
String getExtendedInformation();
}
| Token |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java | {
"start": 1454,
"end": 8890
} | class ____ implements Compressor {
private static final Logger LOG =
LoggerFactory.getLogger(ZStandardCompressor.class);
private long stream;
private int level;
private int directBufferSize;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private ByteBuffer uncompressedDirectBuf = null;
private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
private boolean keepUncompressedBuf = false;
private ByteBuffer compressedDirectBuf = null;
private boolean finish, finished;
private long bytesRead = 0;
private long bytesWritten = 0;
private static boolean nativeZStandardLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library
initIDs();
nativeZStandardLoaded = true;
} catch (Throwable t) {
LOG.warn("Error loading zstandard native libraries: " + t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeZStandardLoaded;
}
public static int getRecommendedBufferSize() {
return getStreamSize();
}
@VisibleForTesting
ZStandardCompressor() {
this(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
}
/**
* Creates a new compressor with the default compression level.
* Compressed data will be generated in ZStandard format.
* @param level level.
* @param bufferSize bufferSize.
*/
public ZStandardCompressor(int level, int bufferSize) {
this(level, bufferSize, bufferSize);
}
@VisibleForTesting
ZStandardCompressor(int level, int inputBufferSize, int outputBufferSize) {
this.level = level;
stream = create();
this.directBufferSize = outputBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(inputBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(outputBufferSize);
compressedDirectBuf.position(outputBufferSize);
reset();
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration. It will reset the compressor's compression level
* and compression strategy.
*
* @param conf Configuration storing new settings
*/
@Override
public void reinit(Configuration conf) {
if (conf == null) {
return;
}
level = ZStandardCodec.getCompressionLevel(conf);
reset();
LOG.debug("Reinit compressor with new compression configuration");
}
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
uncompressedDirectBufOff = 0;
setInputFromSavedData();
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
//copy enough data from userBuf to uncompressedDirectBuf
private void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
uncompressedDirectBuf.put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"Dictionary support is not enabled");
}
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (compressedDirectBuf.remaining() > 0) {
return false;
}
// have we consumed all input
if (keepUncompressedBuf && uncompressedDirectBufLen - uncompressedDirectBufOff > 0) {
return false;
}
if (uncompressedDirectBuf.remaining() > 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
// copy enough data from userBuf to uncompressedDirectBuf
setInputFromSavedData();
// uncompressedDirectBuf is not full
return uncompressedDirectBuf.remaining() > 0;
}
}
return false;
}
@Override
public void finish() {
finish = true;
}
@Override
public boolean finished() {
// Check if 'zstd' says its 'finished' and all compressed
// data has been consumed
return (finished && compressedDirectBuf.remaining() == 0);
}
@Override
public int compress(byte[] b, int off, int len) throws IOException {
checkStream();
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
compressedDirectBuf.get(b, off, n);
return n;
}
// Re-initialize the output direct buffer
compressedDirectBuf.rewind();
compressedDirectBuf.limit(directBufferSize);
// Compress data
n = deflateBytesDirect(
uncompressedDirectBuf,
uncompressedDirectBufOff,
uncompressedDirectBufLen,
compressedDirectBuf,
directBufferSize
);
compressedDirectBuf.limit(n);
// Check if we have consumed all input buffer
if (uncompressedDirectBufLen - uncompressedDirectBufOff <= 0) {
// consumed all input buffer
keepUncompressedBuf = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
} else {
// did not consume all input buffer
keepUncompressedBuf = true;
}
// Get at most 'len' bytes
n = Math.min(n, len);
compressedDirectBuf.get(b, off, n);
return n;
}
/**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public long getBytesWritten() {
checkStream();
return bytesWritten;
}
/**
* <p>Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public long getBytesRead() {
checkStream();
return bytesRead;
}
@Override
public void reset() {
checkStream();
init(level, stream);
finish = false;
finished = false;
bytesRead = 0;
bytesWritten = 0;
uncompressedDirectBuf.rewind();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
keepUncompressedBuf = false;
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
userBufOff = 0;
userBufLen = 0;
}
@Override
public void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
private void checkStream() {
if (stream == 0) {
throw new NullPointerException();
}
}
private native static long create();
private native static void init(int level, long stream);
private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
int srcLen, ByteBuffer dst, int dstLen);
private native static int getStreamSize();
private native static void end(long strm);
private native static void initIDs();
public native static String getLibraryName();
}
| ZStandardCompressor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/FieldMissingNullableTest.java | {
"start": 9997,
"end": 11086
} | class ____ {}
@Nullable Inner message;
public void reset() {
this.message = null;
}
}
""")
.doTest();
}
@Test
public void negativeCases_alreadyAnnotatedRecordComponent() {
createAggressiveCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/FieldMissingNullTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
import org.jspecify.annotations.Nullable;
record FieldMissingNullTest(@Nullable String message) {
boolean hasMessage() {
return message != null;
}
}
""")
.doTest();
}
@Test
public void negativeCases_initializeWithNonNullLiteral() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/FieldMissingNullTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
public | Inner |
java | quarkusio__quarkus | extensions/vertx/deployment/src/test/java/io/quarkus/vertx/CodecRegistrationTest.java | {
"start": 8628,
"end": 9017
} | class ____ {
private final String name;
CustomType1(String name) {
this.name = name;
}
public String getName() {
return name;
}
@Override
public String toString() {
return "CustomType1{" +
"name='" + name + '\'' +
'}';
}
}
static | CustomType1 |
java | apache__spark | common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java | {
"start": 4270,
"end": 4837
} | class ____ {
private final ConcurrentMap<Class<?>, InstanceList<?>> data = new ConcurrentHashMap<>();
@SuppressWarnings("unchecked")
public <T> InstanceList<T> get(Class<T> type) {
return (InstanceList<T>) data.get(type);
}
@SuppressWarnings("unchecked")
public <T> void write(T value) throws Exception {
InstanceList<T> list =
(InstanceList<T>) data.computeIfAbsent(value.getClass(), InstanceList::new);
list.put(value);
}
public void clear() {
data.clear();
}
}
/**
* An alias | InMemoryLists |
java | apache__camel | components/camel-graphql/src/test/java/org/apache/camel/component/graphql/server/GraphqlDataFetchers.java | {
"start": 975,
"end": 2793
} | class ____ {
private static final List<Book> BOOKS = Arrays.asList(
new Book("book-1", "Harry Potter and the Philosopher's Stone", "author-1"),
new Book("book-2", "Moby Dick", "author-2"),
new Book("book-3", "Interview with the vampire", "author-3"));
private static final List<Author> AUTHORS = Arrays.asList(
new Author("author-1", "Joanne Rowling"),
new Author("author-2", "Herman Melville"),
new Author("author-3", "Anne Rice"));
private GraphqlDataFetchers() {
}
public static DataFetcher<List<Book>> getBooksDataFetcher() {
return dataFetchingEnvironment -> BOOKS;
}
public static DataFetcher<Book> getBookByIdDataFetcher() {
return dataFetchingEnvironment -> {
String bookId = dataFetchingEnvironment.getArgument("id");
return BOOKS.stream().filter(book -> book.getId().equals(bookId)).findFirst().orElse(null);
};
}
public static DataFetcher<Author> getAuthorDataFetcher() {
return dataFetchingEnvironment -> {
Book book = dataFetchingEnvironment.getSource();
String authorId = book.getAuthorId();
return AUTHORS.stream().filter(author -> author.getId().equals(authorId)).findFirst().orElse(null);
};
}
public static DataFetcher<Book> addBookDataFetcher() {
return dataFetchingEnvironment -> {
Map<String, Object> bookInput = dataFetchingEnvironment.getArgument("bookInput");
String id = "book-" + (BOOKS.size() + 1);
String name = (String) bookInput.get("name");
String authorId = (String) bookInput.get("authorId");
Book book = new Book(id, name, authorId);
return book;
};
}
}
| GraphqlDataFetchers |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/generics/inheritance/DaoClient.java | {
"start": 89,
"end": 384
} | class ____<T> {
@Inject
Dao<T> dao;
Dao<T> anotherDao;
final Dao<T> constructorDao;
public DaoClient(Dao<T> constructorDao) {
this.constructorDao = constructorDao;
}
@Inject
void setAnotherDao(Dao<T> dao) {
this.anotherDao = dao;
}
}
| DaoClient |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/StateStoreMetricsTest.java | {
"start": 1437,
"end": 16689
} | class ____ {
private static final String TASK_ID = "test-task";
private static final String STORE_NAME = "test-store";
private static final String STORE_TYPE = "test-type";
private static final String STORE_LEVEL_GROUP = "stream-state-metrics";
private static final String BUFFER_NAME = "test-buffer";
private final Sensor expectedSensor = mock(Sensor.class);
private final StreamsMetricsImpl streamsMetrics = mock(StreamsMetricsImpl.class);
private final Map<String, String> storeTagMap = Collections.singletonMap("hello", "world");
@Test
public void shouldGetPutSensor() {
final String metricName = "put";
final String descriptionOfRate = "The average number of calls to put per second";
final String descriptionOfAvg = "The average latency of calls to put";
final String descriptionOfMax = "The maximum latency of calls to put";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.putSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetPutIfAbsentSensor() {
final String metricName = "put-if-absent";
final String descriptionOfRate = "The average number of calls to put-if-absent per second";
final String descriptionOfAvg = "The average latency of calls to put-if-absent";
final String descriptionOfMax = "The maximum latency of calls to put-if-absent";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.putIfAbsentSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetPutAllSensor() {
final String metricName = "put-all";
final String descriptionOfRate = "The average number of calls to put-all per second";
final String descriptionOfAvg = "The average latency of calls to put-all";
final String descriptionOfMax = "The maximum latency of calls to put-all";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.putAllSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetFetchSensor() {
final String metricName = "fetch";
final String descriptionOfRate = "The average number of calls to fetch per second";
final String descriptionOfAvg = "The average latency of calls to fetch";
final String descriptionOfMax = "The maximum latency of calls to fetch";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.fetchSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetGetSensor() {
final String metricName = "get";
final String descriptionOfRate = "The average number of calls to get per second";
final String descriptionOfAvg = "The average latency of calls to get";
final String descriptionOfMax = "The maximum latency of calls to get";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.getSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetAllSensor() {
final String metricName = "all";
final String descriptionOfRate = "The average number of calls to all per second";
final String descriptionOfAvg = "The average latency of calls to all";
final String descriptionOfMax = "The maximum latency of calls to all";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.allSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetRangeSensor() {
final String metricName = "range";
final String descriptionOfRate = "The average number of calls to range per second";
final String descriptionOfAvg = "The average latency of calls to range";
final String descriptionOfMax = "The maximum latency of calls to range";
setupStreamsMetrics(
metricName
);
getAndVerifySensor(
() -> StateStoreMetrics.rangeSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetFlushSensor() {
final String metricName = "flush";
final String descriptionOfRate = "The average number of calls to flush per second";
final String descriptionOfAvg = "The average latency of calls to flush";
final String descriptionOfMax = "The maximum latency of calls to flush";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.flushSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetRemoveSensor() {
final String metricName = "remove";
final String descriptionOfRate = "The average number of calls to remove per second";
final String descriptionOfAvg = "The average latency of calls to remove";
final String descriptionOfMax = "The maximum latency of calls to remove";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.removeSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetDeleteSensor() {
final String metricName = "delete";
final String descriptionOfRate = "The average number of calls to delete per second";
final String descriptionOfAvg = "The average latency of calls to delete";
final String descriptionOfMax = "The maximum latency of calls to delete";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.deleteSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetRestoreSensor() {
final String metricName = "restore";
final String descriptionOfRate = "The average number of restorations per second";
final String descriptionOfAvg = "The average latency of restorations";
final String descriptionOfMax = "The maximum latency of restorations";
setupStreamsMetrics(metricName);
getAndVerifySensor(
() -> StateStoreMetrics.restoreSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax,
descriptionOfRate
);
}
@Test
public void shouldGetPrefixScanSensor() {
final String metricName = "prefix-scan";
final String descriptionOfRate = "The average number of calls to prefix-scan per second";
final String descriptionOfAvg = "The average latency of calls to prefix-scan";
final String descriptionOfMax = "The maximum latency of calls to prefix-scan";
when(streamsMetrics.storeLevelSensor(TASK_ID, STORE_NAME, metricName, RecordingLevel.DEBUG))
.thenReturn(expectedSensor);
when(streamsMetrics.storeLevelTagMap(TASK_ID, STORE_TYPE, STORE_NAME)).thenReturn(storeTagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = StateStoreMetrics.prefixScanSensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addInvocationRateToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
metricName,
descriptionOfRate
)
);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMaxToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
latencyMetricName(metricName),
descriptionOfAvg,
descriptionOfMax
)
);
assertThat(sensor, is(expectedSensor));
}
}
@Test
public void shouldGetSuppressionBufferCountSensor() {
final String metricName = "suppression-buffer-count";
final String descriptionOfAvg = "The average count of buffered records";
final String descriptionOfMax = "The maximum count of buffered records";
setupStreamsMetricsForSuppressionBufferSensor(metricName);
verifySensorSuppressionBufferSensor(
() -> StateStoreMetrics.suppressionBufferCountSensor(TASK_ID, STORE_TYPE, BUFFER_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax
);
}
@Test
public void shouldGetSuppressionBufferSizeSensor() {
final String metricName = "suppression-buffer-size";
final String descriptionOfAvg = "The average size of buffered records";
final String descriptionOfMax = "The maximum size of buffered records";
setupStreamsMetricsForSuppressionBufferSensor(metricName);
verifySensorSuppressionBufferSensor(
() -> StateStoreMetrics.suppressionBufferSizeSensor(TASK_ID, STORE_TYPE, BUFFER_NAME, streamsMetrics),
metricName,
descriptionOfAvg,
descriptionOfMax
);
}
@Test
public void shouldGetRecordE2ELatencySensor() {
final String metricName = "record-e2e-latency";
final String e2eLatencyDescription =
"end-to-end latency of a record, measuring by comparing the record timestamp with the "
+ "system time when it has been fully processed by the node";
final String descriptionOfAvg = "The average " + e2eLatencyDescription;
final String descriptionOfMin = "The minimum " + e2eLatencyDescription;
final String descriptionOfMax = "The maximum " + e2eLatencyDescription;
when(streamsMetrics.storeLevelSensor(TASK_ID, STORE_NAME, metricName, RecordingLevel.TRACE))
.thenReturn(expectedSensor);
when(streamsMetrics.storeLevelTagMap(TASK_ID, STORE_TYPE, STORE_NAME)).thenReturn(storeTagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor =
StateStoreMetrics.e2ELatencySensor(TASK_ID, STORE_TYPE, STORE_NAME, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMinAndMaxToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
metricName,
descriptionOfAvg,
descriptionOfMin,
descriptionOfMax
)
);
assertThat(sensor, is(expectedSensor));
}
}
private void setupStreamsMetrics(final String metricName) {
when(streamsMetrics.storeLevelSensor(TASK_ID, STORE_NAME, metricName, RecordingLevel.DEBUG))
.thenReturn(expectedSensor);
when(streamsMetrics.storeLevelTagMap(TASK_ID, STORE_TYPE, STORE_NAME)).thenReturn(storeTagMap);
}
private void getAndVerifySensor(final Supplier<Sensor> sensorSupplier,
final String metricName,
final String descriptionOfAvg,
final String descriptionOfMax,
final String descriptionOfRate) {
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = sensorSupplier.get();
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addInvocationRateToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
metricName,
descriptionOfRate
)
);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMaxToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
latencyMetricName(metricName),
descriptionOfAvg,
descriptionOfMax
)
);
assertThat(sensor, is(expectedSensor));
}
}
private String latencyMetricName(final String metricName) {
return metricName + StreamsMetricsImpl.LATENCY_SUFFIX;
}
private void setupStreamsMetricsForSuppressionBufferSensor(final String metricName) {
when(streamsMetrics.storeLevelSensor(
TASK_ID,
BUFFER_NAME,
metricName,
RecordingLevel.DEBUG
)).thenReturn(expectedSensor);
when(streamsMetrics.storeLevelTagMap(TASK_ID, STORE_TYPE, BUFFER_NAME)).thenReturn(storeTagMap);
}
private void verifySensorSuppressionBufferSensor(final Supplier<Sensor> sensorSupplier,
final String metricName,
final String descriptionOfAvg,
final String descriptionOfMax) {
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = sensorSupplier.get();
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMaxToSensor(
expectedSensor,
STORE_LEVEL_GROUP,
storeTagMap,
metricName,
descriptionOfAvg,
descriptionOfMax
)
);
assertThat(sensor, is(expectedSensor));
}
}
}
| StateStoreMetricsTest |
java | mapstruct__mapstruct | core/src/main/java/org/mapstruct/SubclassMappings.java | {
"start": 1145,
"end": 1568
} | interface ____ {
* @SubclassMapping(source = First.class, target = FirstTargetSub.class),
* @SubclassMapping(source = SecondSub.class, target = SecondTargetSub.class)
* ParentTarget toParentTarget(Parent parent);
* }
* </code></pre>
*
* @author Ben Zegveld
* @since 1.5
*/
@Target({ ElementType.METHOD, ElementType.ANNOTATION_TYPE })
@Retention(RetentionPolicy.CLASS)
@Experimental
public @ | MyMapper |
java | alibaba__fastjson | src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectD2.java | {
"start": 92,
"end": 1816
} | class ____ {
private int a;
private int b;
private int c;
private List<Integer> d;
private List<Integer> e;
private List<Integer> f;
private List<Integer> g;
private List<Integer> h;
private List<CommonObject> i;
private int j;
private int k;
private int l;
private boolean m;
private boolean n;
private int o;
public int getA() {
return a;
}
public void setA(int a) {
this.a = a;
}
public int getB() {
return b;
}
public void setB(int b) {
this.b = b;
}
public int getC() {
return c;
}
public void setC(int c) {
this.c = c;
}
public List<Integer> getD() {
return d;
}
public void setD(List<Integer> d) {
this.d = d;
}
public List<Integer> getE() {
return e;
}
public void setE(List<Integer> e) {
this.e = e;
}
public List<Integer> getF() {
return f;
}
public void setF(List<Integer> f) {
this.f = f;
}
public List<Integer> getG() {
return g;
}
public void setG(List<Integer> g) {
this.g = g;
}
public List<Integer> getH() {
return h;
}
public void setH(List<Integer> h) {
this.h = h;
}
public List<CommonObject> getI() {
return i;
}
public void setI(List<CommonObject> i) {
this.i = i;
}
public int getJ() {
return j;
}
public void setJ(int j) {
this.j = j;
}
public int getK() {
return k;
}
public void setK(int k) {
this.k = k;
}
public int getL() {
return l;
}
public void setL(int l) {
this.l = l;
}
public boolean isM() {
return m;
}
public void setM(boolean m) {
this.m = m;
}
public boolean isN() {
return n;
}
public void setN(boolean n) {
this.n = n;
}
public int getO() {
return o;
}
public void setO(int o) {
this.o = o;
}
}
| ObjectD2 |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/async/AsyncMethodsSpringTestContextIntegrationTests.java | {
"start": 1259,
"end": 1557
} | class ____ {@code -Xmx8M} to verify that there are no
* issues with memory leaks as raised in
* <a href="https://github.com/spring-projects/spring-framework/issues/23571">gh-23571</a>.
*
* @author Sam Brannen
* @since 5.2
*/
@SpringJUnitConfig
@Disabled("Only meant to be executed manually")
| with |
java | apache__kafka | server-common/src/test/java/org/apache/kafka/server/common/FinalizedFeaturesTest.java | {
"start": 1174,
"end": 2396
} | class ____ {
@Test
public void testKRaftModeFeatures() {
FinalizedFeatures finalizedFeatures = new FinalizedFeatures(MINIMUM_VERSION,
Map.of("foo", (short) 2), 123);
assertEquals(MINIMUM_VERSION.featureLevel(),
finalizedFeatures.finalizedFeatures().get(FEATURE_NAME));
assertEquals((short) 2,
finalizedFeatures.finalizedFeatures().get("foo"));
assertEquals(2, finalizedFeatures.finalizedFeatures().size());
}
@Test
public void testSetFinalizedLevel() {
FinalizedFeatures finalizedFeatures = new FinalizedFeatures(
MINIMUM_VERSION,
Map.of("foo", (short) 2),
123
);
// Override an existing finalized feature version to 0
FinalizedFeatures removedFeatures = finalizedFeatures.setFinalizedLevel("foo", (short) 0);
assertNull(removedFeatures.finalizedFeatures().get("foo"));
// Override a missing finalized feature version to 0
FinalizedFeatures sameFeatures = removedFeatures.setFinalizedLevel("foo", (short) 0);
assertEquals(sameFeatures.finalizedFeatures(), removedFeatures.finalizedFeatures());
}
}
| FinalizedFeaturesTest |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java | {
"start": 1121,
"end": 1383
} | class ____ the same parameters as constructor.
*/
protected abstract T createInternalGeoGrid(String name, int size, List<InternalGeoGridBucket> buckets, Map<String, Object> metadata);
/**
* Instantiate a {@link InternalGeoGridBucket}-derived | using |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/response/AzureAiStudioEmbeddingsResponseEntityTests.java | {
"start": 985,
"end": 2149
} | class ____ extends ESTestCase {
public void testFromResponse_CreatesResultsForASingleItem() throws IOException {
String responseJson = """
{
"object": "list",
"data": [
{
"object": "embedding",
"index": 0,
"embedding": [
0.014539449,
-0.015288644
]
}
],
"model": "text-embedding-ada-002-v2",
"usage": {
"prompt_tokens": 8,
"total_tokens": 8
}
}
""";
var entity = new AzureAiStudioEmbeddingsResponseEntity();
var parsedResults = (DenseEmbeddingFloatResults) entity.apply(
mock(Request.class),
new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8))
);
assertThat(parsedResults.embeddings(), is(List.of(DenseEmbeddingFloatResults.Embedding.of(List.of(0.014539449F, -0.015288644F)))));
}
}
| AzureAiStudioEmbeddingsResponseEntityTests |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java | {
"start": 12528,
"end": 12649
} | class ____ {
}
@SelectMethod(type = OneParameterTestCase.class, name = "testMethod", parameterTypeNames = "int")
| SuiteA |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/OpenTelemetryDestroyerTest.java | {
"start": 590,
"end": 1795
} | class ____ {
@RegisterExtension
final static QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClasses(TestSpanExporter.class,
TestSpanExporterProvider.class,
HelloResource.class)
.addAsResource(new StringAsset(TestSpanExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider")
.add(new StringAsset(
"""
quarkus.otel.traces.exporter=test-span-exporter
quarkus.otel.metrics.exporter=none
quarkus.otel.experimental.shutdown-wait-time=PT60S
"""),
"application.properties"));
@Test
void getShutdownWaitTime() {
RestAssured.when()
.get("/hello").then()
.statusCode(200)
.body(is("PT1M"));
}
@Path("/hello")
public static | OpenTelemetryDestroyerTest |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/discovery/DiscoveryIssueReporter.java | {
"start": 1139,
"end": 4726
} | interface ____ {
/**
* Create a new {@code DiscoveryIssueReporter} that reports issues to the
* supplied {@link EngineDiscoveryListener} for the specified engine.
*
* @param engineDiscoveryListener the listener to report issues to; never
* {@code null}
* @param engineId the unique identifier of the engine; never {@code null}
*/
static DiscoveryIssueReporter forwarding(EngineDiscoveryListener engineDiscoveryListener, UniqueId engineId) {
Preconditions.notNull(engineDiscoveryListener, "engineDiscoveryListener must not be null");
Preconditions.notNull(engineId, "engineId must not be null");
return issue -> engineDiscoveryListener.issueEncountered(engineId, issue);
}
/**
* Create a new {@code DiscoveryIssueReporter} that adds reported issues to
* the supplied collection.
*
* @param collection the collection to add issues to; never {@code null}
*/
static DiscoveryIssueReporter collecting(Collection<? super DiscoveryIssue> collection) {
Preconditions.notNull(collection, "collection must not be null");
return consuming(collection::add);
}
/**
* Create a new {@code DiscoveryIssueReporter} that adds reported issues to
* the supplied consumer.
*
* @param consumer the consumer to report issues to; never {@code null}
*/
static DiscoveryIssueReporter consuming(Consumer<? super DiscoveryIssue> consumer) {
Preconditions.notNull(consumer, "consumer must not be null");
return consumer::accept;
}
/**
* Create a new {@code DiscoveryIssueReporter} that avoids reporting
* duplicate issues.
*
* <p>The implementation returned by this method is not thread-safe.
*
* @param delegate the delegate to forward issues to; never {@code null}
*/
static DiscoveryIssueReporter deduplicating(DiscoveryIssueReporter delegate) {
Preconditions.notNull(delegate, "delegate must not be null");
Set<DiscoveryIssue> seen = new HashSet<>();
return issue -> {
boolean notSeen = seen.add(issue);
if (notSeen) {
delegate.reportIssue(issue);
}
};
}
/**
* Build the supplied {@link DiscoveryIssue.Builder Builder} and report the
* resulting {@link DiscoveryIssue}.
*/
default void reportIssue(DiscoveryIssue.Builder builder) {
reportIssue(builder.build());
}
/**
* Report the supplied {@link DiscoveryIssue}.
*/
void reportIssue(DiscoveryIssue issue);
/**
* Create a {@link Condition} that reports a {@link DiscoveryIssue} when the
* supplied {@link Predicate} is not met.
*
* @param predicate the predicate to test; never {@code null}
* @param issueCreator the function to create the issue with; never {@code null}
* @return a new {@code Condition}; never {@code null}
*/
default <T> Condition<T> createReportingCondition(Predicate<T> predicate,
Function<T, DiscoveryIssue> issueCreator) {
Preconditions.notNull(predicate, "predicate must not be null");
Preconditions.notNull(issueCreator, "issueCreator must not be null");
return value -> {
if (predicate.test(value)) {
return true;
}
else {
reportIssue(issueCreator.apply(value));
return false;
}
};
}
/**
* A {@code Condition} is a union of {@link Predicate} and {@link Consumer}.
*
* <p>Instances of this type may be used as {@link Predicate Predicates} or
* {@link Consumer Consumers}. For example, a {@code Condition} may be
* passed to {@link java.util.stream.Stream#filter(Predicate)} if it is used
* for filtering, or to {@link java.util.stream.Stream#peek(Consumer)} if it
* is only used for reporting or other side effects.
*
* <p>This | DiscoveryIssueReporter |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestReadAndSeekPageBlobAfterWrite.java | {
"start": 1585,
"end": 12027
} | class ____ extends AbstractAzureScaleTest {
private static final Logger LOG =
LoggerFactory.getLogger(ITestReadAndSeekPageBlobAfterWrite.class);
private FileSystem fs;
private byte[] randomData;
// Page blob physical page size
private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE;
// Size of data on page (excluding header)
private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE;
private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test
private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test
private Random rand = new Random();
// A key with a prefix under /pageBlobs, which for the test file system will
// force use of a page blob.
private static final String KEY = "/pageBlobs/file.dat";
// path of page blob file to read and write
private Path blobPath;
@BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
fs = getTestAccount().getFileSystem();
// Make sure we are using an integral number of pages.
assertEquals(0, MAX_BYTES % PAGE_SIZE);
// load an in-memory array of random data
randomData = new byte[PAGE_SIZE * MAX_PAGES];
rand.nextBytes(randomData);
blobPath = blobPath("ITestReadAndSeekPageBlobAfterWrite");
}
@Override
public void tearDown() throws Exception {
deleteQuietly(fs, blobPath, true);
super.tearDown();
}
/**
* Make sure the file name (key) is a page blob file name. If anybody changes that,
* we need to come back and update this test class.
*/
@Test
public void testIsPageBlobFileName() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
String[] a = blobPath.toUri().getPath().split("/");
String key2 = a[1] + "/";
assertTrue(store.isPageBlobKey(key2), "Not a page blob: " + blobPath);
}
/**
* For a set of different file sizes, write some random data to a page blob,
* read it back, and compare that what was read is the same as what was written.
*/
@Test
public void testReadAfterWriteRandomData() throws IOException {
// local shorthand
final int pds = PAGE_DATA_SIZE;
// Test for sizes at and near page boundaries
int[] dataSizes = {
// on first page
0, 1, 2, 3,
// Near first physical page boundary (because the implementation
// stores PDS + the page header size bytes on each page).
pds - 1, pds, pds + 1, pds + 2, pds + 3,
// near second physical page boundary
(2 * pds) - 1, (2 * pds), (2 * pds) + 1, (2 * pds) + 2, (2 * pds) + 3,
// near tenth physical page boundary
(10 * pds) - 1, (10 * pds), (10 * pds) + 1, (10 * pds) + 2, (10 * pds) + 3,
// test one big size, >> 4MB (an internal buffer size in the code)
MAX_BYTES
};
for (int i : dataSizes) {
testReadAfterWriteRandomData(i);
}
}
private void testReadAfterWriteRandomData(int size) throws IOException {
writeRandomData(size);
readRandomDataAndVerify(size);
}
/**
* Read "size" bytes of data and verify that what was read and what was written
* are the same.
*/
private void readRandomDataAndVerify(int size) throws AzureException, IOException {
byte[] b = new byte[size];
FSDataInputStream stream = fs.open(blobPath);
int bytesRead = stream.read(b);
stream.close();
assertEquals(bytesRead, size);
// compare the data read to the data written
assertTrue(comparePrefix(randomData, b, size));
}
// return true if the beginning "size" values of the arrays are the same
private boolean comparePrefix(byte[] a, byte[] b, int size) {
if (a.length < size || b.length < size) {
return false;
}
for (int i = 0; i < size; i++) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
// Write a specified amount of random data to the file path for this test class.
private void writeRandomData(int size) throws IOException {
OutputStream output = fs.create(blobPath);
output.write(randomData, 0, size);
output.close();
}
/**
* Write data to a page blob, open it, seek, and then read a range of data.
* Then compare that the data read from that range is the same as the data originally written.
*/
@Test
public void testPageBlobSeekAndReadAfterWrite() throws IOException {
writeRandomData(PAGE_SIZE * MAX_PAGES);
int recordSize = 100;
byte[] b = new byte[recordSize];
try(FSDataInputStream stream = fs.open(blobPath)) {
// Seek to a boundary around the middle of the 6th page
int seekPosition = 5 * PAGE_SIZE + 250;
stream.seek(seekPosition);
// Read a record's worth of bytes and verify results
int bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Seek to another spot and read a record greater than a page
seekPosition = 10 * PAGE_SIZE + 250;
stream.seek(seekPosition);
recordSize = 1000;
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read the last 100 bytes of the file
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read past the end of the file and we should get only partial data.
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
assertEquals(50, bytesRead);
// compare last 50 bytes written with those read
byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
assertTrue(comparePrefix(tail, b, 50));
}
}
// Verify that reading a record of data after seeking gives the expected data.
private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) {
byte[] originalRecordData =
Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1);
assertEquals(recordSize, bytesRead);
assertTrue(comparePrefix(originalRecordData, b, recordSize));
}
// Test many small flushed writes interspersed with periodic hflush calls.
// For manual testing, increase NUM_WRITES to a large number.
// The goal for a long-running manual test is to make sure that it finishes
// and the close() call does not time out. It also facilitates debugging into
// hflush/hsync.
@Test
public void testManySmallWritesWithHFlush() throws IOException {
writeAndReadOneFile(50, 100, 20);
}
/**
* Write a total of numWrites * recordLength data to a file, read it back,
* and check to make sure what was read is the same as what was written.
* The syncInterval is the number of writes after which to call hflush to
* force the data to storage.
*/
private void writeAndReadOneFile(int numWrites,
int recordLength, int syncInterval) throws IOException {
// A lower bound on the minimum time we think it will take to do
// a write to Azure storage.
final long MINIMUM_EXPECTED_TIME = 20;
LOG.info("Writing " + numWrites * recordLength + " bytes to " + blobPath.getName());
FSDataOutputStream output = fs.create(blobPath);
int writesSinceHFlush = 0;
try {
// Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream,
// to test concurrent execution gates.
output.flush();
output.hflush();
for (int i = 0; i < numWrites; i++) {
output.write(randomData, i * recordLength, recordLength);
writesSinceHFlush++;
output.flush();
if ((i % syncInterval) == 0) {
output.hflush();
writesSinceHFlush = 0;
}
}
} finally {
long start = Time.monotonicNow();
output.close();
long end = Time.monotonicNow();
LOG.debug("close duration = " + (end - start) + " msec.");
if (writesSinceHFlush > 0) {
assertTrue(end - start >= MINIMUM_EXPECTED_TIME, String.format(
"close duration with >= 1 pending write is %d, less than minimum expected of %d",
end - start, MINIMUM_EXPECTED_TIME));
}
}
// Read the data back and check it.
FSDataInputStream stream = fs.open(blobPath);
int SIZE = numWrites * recordLength;
byte[] b = new byte[SIZE];
try {
stream.seek(0);
stream.read(b, 0, SIZE);
verifyReadRandomData(b, SIZE, 0, SIZE);
} finally {
stream.close();
}
// delete the file
fs.delete(blobPath, false);
}
// Test writing to a large file repeatedly as a stress test.
// Set the repetitions to a larger number for manual testing
// for a longer stress run.
@Test
public void testLargeFileStress() throws IOException {
int numWrites = 32;
int recordSize = 1024 * 1024;
int syncInterval = 10;
int repetitions = 1;
for (int i = 0; i < repetitions; i++) {
writeAndReadOneFile(numWrites, recordSize, syncInterval);
}
}
// Write to a file repeatedly to verify that it extends.
// The page blob file should start out at 128MB and finish at 256MB.
public void testFileSizeExtension() throws IOException {
final int writeSize = 1024 * 1024;
final int numWrites = 129;
final byte dataByte = 5;
byte[] data = new byte[writeSize];
Arrays.fill(data, dataByte);
try (FSDataOutputStream output = fs.create(blobPath)) {
for (int i = 0; i < numWrites; i++) {
output.write(data);
output.hflush();
LOG.debug("total writes = " + (i + 1));
}
}
// Show that we wrote more than the default page blob file size.
assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
// Verify we can list the new size. That will prove we expanded the file.
FileStatus[] status = fs.listStatus(blobPath);
assertEquals(numWrites * writeSize, status[0].getLen(),
"File size hasn't changed " + status);
LOG.debug("Total bytes written to " + blobPath + " = " + status[0].getLen());
fs.delete(blobPath, false);
}
}
| ITestReadAndSeekPageBlobAfterWrite |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsagePrecommitPlugin.java | {
"start": 897,
"end": 2097
} | class ____ extends PrecommitPlugin {
@Override
public TaskProvider<? extends Task> createTask(Project project) {
Configuration loggerUsageConfig = project.getConfigurations().create("loggerUsagePlugin");
// this makes it easier to test by not requiring this project to be always available in our
// test sample projects
if (project.findProject(":test:logger-usage") != null) {
project.getDependencies().add("loggerUsagePlugin", project.project(":test:logger-usage"));
}
TaskProvider<LoggerUsageTask> loggerUsage = project.getTasks().register("loggerUsageCheck", LoggerUsageTask.class);
SourceSetContainer sourceSets = project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets();
sourceSets.matching(
sourceSet -> sourceSet.getName().equals(SourceSet.MAIN_SOURCE_SET_NAME)
|| sourceSet.getName().equals(SourceSet.TEST_SOURCE_SET_NAME)
).all(sourceSet -> loggerUsage.configure(t -> t.addSourceSet(sourceSet)));
loggerUsage.configure(
t -> t.setClasspath(loggerUsageConfig)
);
return loggerUsage;
}
}
| LoggerUsagePrecommitPlugin |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java | {
"start": 67798,
"end": 68234
} | class ____ extends AbstractTestHelper {
public TestHelper() {
super(fakeClock, syncContext);
}
@Override
public Map<List<EquivalentAddressGroup>, Subchannel> getSubchannelMap() {
return subchannels;
}
@Override
public MetricRecorder getMetricRecorder() {
return mockMetricRecorder;
}
@Override
public String getChannelTarget() {
return channelTarget;
}
}
}
| TestHelper |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/jaxrs/UriInfoImpl.java | {
"start": 1069,
"end": 7575
} | class ____ implements UriInfo {
private final ResteasyReactiveRequestContext currentRequest;
private MultivaluedMap<String, String> queryParams;
// marker for which target the pathParams where created, may be null when getPathParams was never called
private RuntimeResource pathParamsTargetMarker;
private MultivaluedMap<String, String> pathParams;
private URI requestUri;
public UriInfoImpl(ResteasyReactiveRequestContext currentRequest) {
this.currentRequest = currentRequest;
}
@Override
public String getPath() {
return getPath(true);
}
@Override
public String getPath(boolean decode) {
if (!decode) {
throw encodedNotSupported();
}
// TCK says normalized
String path = URIDecoder.decodeURIComponent(currentRequest.getPath(), false);
// the path must not contain the prefix
String prefix = currentRequest.getDeployment().getPrefix();
if (prefix.isEmpty()) {
return path;
}
// else skip the prefix
if (path.length() == prefix.length()) {
return "/";
}
return path.substring(prefix.length());
}
@Override
public List<PathSegment> getPathSegments() {
return getPathSegments(true);
}
@Override
public List<PathSegment> getPathSegments(boolean decode) {
if (!decode) {
throw encodedNotSupported();
}
return PathSegmentImpl.parseSegments(getPath(), decode);
}
@Override
public URI getRequestUri() {
if (requestUri == null) {
ServerHttpRequest request = currentRequest.serverRequest();
try {
// TCK says normalized
requestUri = new URI(currentRequest.getAbsoluteURI())
.normalize();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
return requestUri;
}
@Override
public UriBuilder getRequestUriBuilder() {
return UriBuilder.fromUri(getRequestUri());
}
@Override
public URI getAbsolutePath() {
try {
// TCK says normalized
String effectiveURI = currentRequest.getAbsoluteURI();
int queryParamsIndex = effectiveURI.indexOf('?');
if (queryParamsIndex > 0) {
// the spec says that getAbsolutePath() does not contain query parameters
effectiveURI = effectiveURI.substring(0, queryParamsIndex);
}
return new URI(effectiveURI).normalize();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
@Override
public UriBuilder getAbsolutePathBuilder() {
return UriBuilder.fromUri(getAbsolutePath());
}
@Override
public URI getBaseUri() {
return LocationUtil.getUri("", currentRequest, true);
}
@Override
public UriBuilder getBaseUriBuilder() {
return UriBuilder.fromUri(getBaseUri());
}
@Override
public MultivaluedMap<String, String> getPathParameters() {
return getPathParameters(true);
}
@Override
public MultivaluedMap<String, String> getPathParameters(boolean decode) {
if (!decode) {
throw encodedNotSupported();
}
// pathParams have to be recreated when the target changes.
// this happens e.g. when the ResteasyReactiveRequestContext#restart is called for sub resources
// The sub resource, can have additional path params that are not present on the locator
if (pathParams == null && pathParamsTargetMarker == null || pathParamsTargetMarker != currentRequest.getTarget()) {
pathParams = currentRequest.getAllPathParameters(false);
pathParamsTargetMarker = currentRequest.getTarget();
}
return new UnmodifiableMultivaluedMap<>(pathParams);
}
private RuntimeException encodedNotSupported() {
return new IllegalArgumentException("We do not support non-decoded parameters");
}
@Override
public MultivaluedMap<String, String> getQueryParameters() {
return getQueryParameters(true);
}
@Override
public MultivaluedMap<String, String> getQueryParameters(boolean decode) {
if (!decode) {
throw encodedNotSupported();
}
if (queryParams == null) {
queryParams = new QuarkusMultivaluedHashMap<>();
Collection<String> entries = currentRequest.serverRequest().queryParamNames();
for (String i : entries) {
queryParams.addAll(i, currentRequest.serverRequest().getAllQueryParams(i));
}
}
return new UnmodifiableMultivaluedMap<>(queryParams);
}
@Override
public List<String> getMatchedURIs() {
return getMatchedURIs(true);
}
@Override
public List<String> getMatchedURIs(boolean decode) {
if (!decode) {
throw encodedNotSupported();
}
if (currentRequest.getTarget() == null) {
return Collections.emptyList();
}
List<UriMatch> oldMatches = currentRequest.getMatchedURIs();
List<String> matched = new ArrayList<>();
String last = null;
for (int i = 0; i < oldMatches.size(); ++i) {
String m = oldMatches.get(i).matched;
if (!m.equals(last)) {
matched.add(m);
last = m;
}
}
return matched;
}
@Override
public List<Object> getMatchedResources() {
List<UriMatch> oldMatches = currentRequest.getMatchedURIs();
List<Object> matched = new ArrayList<>();
for (int i = 0; i < oldMatches.size(); ++i) {
Object target = oldMatches.get(i).target;
if (target != null) {
matched.add(target);
}
}
return matched;
}
@Override
public URI resolve(URI uri) {
return getBaseUri().resolve(uri);
}
@Override
public URI relativize(URI uri) {
URI from = getRequestUri();
URI to = uri;
if (uri.getScheme() == null && uri.getHost() == null) {
to = getBaseUriBuilder().replaceQuery(null).path(uri.getPath()).replaceQuery(uri.getQuery())
.fragment(uri.getFragment()).build();
}
return UriBuilderImpl.relativize(from, to);
}
}
| UriInfoImpl |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ActivateAutoFollowPatternAction.java | {
"start": 842,
"end": 1219
} | class ____ extends ActionType<AcknowledgedResponse> {
public static final String NAME = "cluster:admin/xpack/ccr/auto_follow_pattern/activate";
public static final ActivateAutoFollowPatternAction INSTANCE = new ActivateAutoFollowPatternAction();
private ActivateAutoFollowPatternAction() {
super(NAME);
}
public static | ActivateAutoFollowPatternAction |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/SortAgnostic.java | {
"start": 635,
"end": 1956
} | interface ____ used between two sorts,
* then we can assume that
* <p>
* <code>
* | SORT x, y, z | MY_COMMAND | SORT a, b, c
* </code>
* <p>
* is equivalent to
* <p>
* <code>
* | MY_COMMAND | SORT a, b, c
* </code>
*
* <hr>
* <p>
*
* Example 2: commands that make previous order irrelevant, eg. because they collapse the results;
* STATS is one of them, eg.
*
* <p>
* <code>
* | SORT x, y, z | STATS count(*)
* </code>
* <p>
* is equivalent to
* <p>
* <code>
* | STATS count(*)
* </code>
* <p>
*
* and if MY_COMMAND implements this interface, then
*
* <p>
* <code>
* | SORT x, y, z | MY_COMMAND | STATS count(*)
* </code>
* <p>
* is equivalent to
* <p>
* <code>
* | MY_COMMAND | STATS count(*)
* </code>
*
* <hr>
* <p>
*
* In all the other cases, eg. if the command does not implement this interface
* then we assume that the previous SORT is still relevant and cannot be pruned.
*
* <hr>
* <p>
*
* Eg. LIMIT does <b>not</b> implement this interface, because
*
* <p>
* <code>
* | SORT x, y, z | LIMIT 10 | SORT a, b, c
* </code>
* <p>
* is <b>NOT</b> equivalent to
* <p>
* <code>
* | LIMIT 10 | SORT a, b, c
* </code>
*
* <hr>
* <p>
*
* For n-ary plans that implement this interface,
* we assume that the above applies to all the children
*
*/
public | is |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ServerEndpointBuilderFactory.java | {
"start": 1634,
"end": 19432
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedServerEndpointConsumerBuilder advanced() {
return (AdvancedServerEndpointConsumerBuilder) this;
}
/**
* Data module options.
*
* The option is a:
* <code>org.eclipse.neoscada.protocol.iec60870.client.data.DataModuleOptions</code> type.
*
* Group: common
*
* @param dataModuleOptions the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder dataModuleOptions(org.eclipse.neoscada.protocol.iec60870.client.data.DataModuleOptions dataModuleOptions) {
doSetProperty("dataModuleOptions", dataModuleOptions);
return this;
}
/**
* Data module options.
*
* The option will be converted to a
* <code>org.eclipse.neoscada.protocol.iec60870.client.data.DataModuleOptions</code> type.
*
* Group: common
*
* @param dataModuleOptions the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder dataModuleOptions(String dataModuleOptions) {
doSetProperty("dataModuleOptions", dataModuleOptions);
return this;
}
/**
* Filter out all requests which don't have the execute bit set.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param filterNonExecute the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder filterNonExecute(boolean filterNonExecute) {
doSetProperty("filterNonExecute", filterNonExecute);
return this;
}
/**
* Filter out all requests which don't have the execute bit set.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param filterNonExecute the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder filterNonExecute(String filterNonExecute) {
doSetProperty("filterNonExecute", filterNonExecute);
return this;
}
/**
* Protocol options.
*
* The option is a:
* <code>org.eclipse.neoscada.protocol.iec60870.ProtocolOptions</code>
* type.
*
* Group: common
*
* @param protocolOptions the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder protocolOptions(org.eclipse.neoscada.protocol.iec60870.ProtocolOptions protocolOptions) {
doSetProperty("protocolOptions", protocolOptions);
return this;
}
/**
* Protocol options.
*
* The option will be converted to a
* <code>org.eclipse.neoscada.protocol.iec60870.ProtocolOptions</code>
* type.
*
* Group: common
*
* @param protocolOptions the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder protocolOptions(String protocolOptions) {
doSetProperty("protocolOptions", protocolOptions);
return this;
}
/**
* Parameter W - Acknowledgment window.
*
* The option is a: <code>short</code> type.
*
* Default: 10
* Group: connection
*
* @param acknowledgeWindow the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder acknowledgeWindow(short acknowledgeWindow) {
doSetProperty("acknowledgeWindow", acknowledgeWindow);
return this;
}
/**
* Parameter W - Acknowledgment window.
*
* The option will be converted to a <code>short</code> type.
*
* Default: 10
* Group: connection
*
* @param acknowledgeWindow the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder acknowledgeWindow(String acknowledgeWindow) {
doSetProperty("acknowledgeWindow", acknowledgeWindow);
return this;
}
/**
* The common ASDU address size. May be either SIZE_1 or SIZE_2.
*
* The option is a:
* <code>org.eclipse.neoscada.protocol.iec60870.ASDUAddressType</code>
* type.
*
* Group: connection
*
* @param adsuAddressType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder adsuAddressType(org.eclipse.neoscada.protocol.iec60870.ASDUAddressType adsuAddressType) {
doSetProperty("adsuAddressType", adsuAddressType);
return this;
}
/**
* The common ASDU address size. May be either SIZE_1 or SIZE_2.
*
* The option will be converted to a
* <code>org.eclipse.neoscada.protocol.iec60870.ASDUAddressType</code>
* type.
*
* Group: connection
*
* @param adsuAddressType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder adsuAddressType(String adsuAddressType) {
doSetProperty("adsuAddressType", adsuAddressType);
return this;
}
/**
* The cause of transmission type. May be either SIZE_1 or SIZE_2.
*
* The option is a:
* <code>org.eclipse.neoscada.protocol.iec60870.CauseOfTransmissionType</code> type.
*
* Group: connection
*
* @param causeOfTransmissionType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder causeOfTransmissionType(org.eclipse.neoscada.protocol.iec60870.CauseOfTransmissionType causeOfTransmissionType) {
doSetProperty("causeOfTransmissionType", causeOfTransmissionType);
return this;
}
/**
* The cause of transmission type. May be either SIZE_1 or SIZE_2.
*
* The option will be converted to a
* <code>org.eclipse.neoscada.protocol.iec60870.CauseOfTransmissionType</code> type.
*
* Group: connection
*
* @param causeOfTransmissionType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder causeOfTransmissionType(String causeOfTransmissionType) {
doSetProperty("causeOfTransmissionType", causeOfTransmissionType);
return this;
}
/**
* The information address size. May be either SIZE_1, SIZE_2 or SIZE_3.
*
* The option is a:
* <code>org.eclipse.neoscada.protocol.iec60870.InformationObjectAddressType</code> type.
*
* Group: connection
*
* @param informationObjectAddressType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder informationObjectAddressType(org.eclipse.neoscada.protocol.iec60870.InformationObjectAddressType informationObjectAddressType) {
doSetProperty("informationObjectAddressType", informationObjectAddressType);
return this;
}
/**
* The information address size. May be either SIZE_1, SIZE_2 or SIZE_3.
*
* The option will be converted to a
* <code>org.eclipse.neoscada.protocol.iec60870.InformationObjectAddressType</code> type.
*
* Group: connection
*
* @param informationObjectAddressType the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder informationObjectAddressType(String informationObjectAddressType) {
doSetProperty("informationObjectAddressType", informationObjectAddressType);
return this;
}
/**
* Parameter K - Maximum number of un-acknowledged messages.
*
* The option is a: <code>short</code> type.
*
* Default: 15
* Group: connection
*
* @param maxUnacknowledged the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder maxUnacknowledged(short maxUnacknowledged) {
doSetProperty("maxUnacknowledged", maxUnacknowledged);
return this;
}
/**
* Parameter K - Maximum number of un-acknowledged messages.
*
* The option will be converted to a <code>short</code> type.
*
* Default: 15
* Group: connection
*
* @param maxUnacknowledged the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder maxUnacknowledged(String maxUnacknowledged) {
doSetProperty("maxUnacknowledged", maxUnacknowledged);
return this;
}
/**
* Timeout T1 in milliseconds.
*
* The option is a: <code>int</code> type.
*
* Default: 15000
* Group: connection
*
* @param timeout1 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout1(int timeout1) {
doSetProperty("timeout1", timeout1);
return this;
}
/**
* Timeout T1 in milliseconds.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 15000
* Group: connection
*
* @param timeout1 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout1(String timeout1) {
doSetProperty("timeout1", timeout1);
return this;
}
/**
* Timeout T2 in milliseconds.
*
* The option is a: <code>int</code> type.
*
* Default: 10000
* Group: connection
*
* @param timeout2 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout2(int timeout2) {
doSetProperty("timeout2", timeout2);
return this;
}
/**
* Timeout T2 in milliseconds.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 10000
* Group: connection
*
* @param timeout2 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout2(String timeout2) {
doSetProperty("timeout2", timeout2);
return this;
}
/**
* Timeout T3 in milliseconds.
*
* The option is a: <code>int</code> type.
*
* Default: 20000
* Group: connection
*
* @param timeout3 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout3(int timeout3) {
doSetProperty("timeout3", timeout3);
return this;
}
/**
* Timeout T3 in milliseconds.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 20000
* Group: connection
*
* @param timeout3 the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeout3(String timeout3) {
doSetProperty("timeout3", timeout3);
return this;
}
/**
* Whether to include the source address.
*
* The option is a: <code>byte</code> type.
*
* Group: data
*
* @param causeSourceAddress the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder causeSourceAddress(byte causeSourceAddress) {
doSetProperty("causeSourceAddress", causeSourceAddress);
return this;
}
/**
* Whether to include the source address.
*
* The option will be converted to a <code>byte</code> type.
*
* Group: data
*
* @param causeSourceAddress the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder causeSourceAddress(String causeSourceAddress) {
doSetProperty("causeSourceAddress", causeSourceAddress);
return this;
}
/**
* Timeout in millis to wait for client to establish a connected
* connection.
*
* The option is a: <code>int</code> type.
*
* Default: 10000
* Group: data
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder connectionTimeout(int connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Timeout in millis to wait for client to establish a connected
* connection.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 10000
* Group: data
*
* @param connectionTimeout the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder connectionTimeout(String connectionTimeout) {
doSetProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Whether background scan transmissions should be ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: data
*
* @param ignoreBackgroundScan the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder ignoreBackgroundScan(boolean ignoreBackgroundScan) {
doSetProperty("ignoreBackgroundScan", ignoreBackgroundScan);
return this;
}
/**
* Whether background scan transmissions should be ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: data
*
* @param ignoreBackgroundScan the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder ignoreBackgroundScan(String ignoreBackgroundScan) {
doSetProperty("ignoreBackgroundScan", ignoreBackgroundScan);
return this;
}
/**
* Whether to ignore or respect DST.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: data
*
* @param ignoreDaylightSavingTime the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder ignoreDaylightSavingTime(boolean ignoreDaylightSavingTime) {
doSetProperty("ignoreDaylightSavingTime", ignoreDaylightSavingTime);
return this;
}
/**
* Whether to ignore or respect DST.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: data
*
* @param ignoreDaylightSavingTime the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder ignoreDaylightSavingTime(String ignoreDaylightSavingTime) {
doSetProperty("ignoreDaylightSavingTime", ignoreDaylightSavingTime);
return this;
}
/**
* The timezone to use. May be any Java time zone string.
*
* The option is a: <code>java.util.TimeZone</code> type.
*
* Default: UTC
* Group: data
*
* @param timeZone the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeZone(TimeZone timeZone) {
doSetProperty("timeZone", timeZone);
return this;
}
/**
* The timezone to use. May be any Java time zone string.
*
* The option will be converted to a <code>java.util.TimeZone</code>
* type.
*
* Default: UTC
* Group: data
*
* @param timeZone the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder timeZone(String timeZone) {
doSetProperty("timeZone", timeZone);
return this;
}
/**
* An identifier grouping connection instances.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: id
*
* @param connectionId the value to set
* @return the dsl builder
*/
default ServerEndpointConsumerBuilder connectionId(String connectionId) {
doSetProperty("connectionId", connectionId);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the IEC 60870 Server component.
*/
public | ServerEndpointConsumerBuilder |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Timestream2EndpointBuilderFactory.java | {
"start": 22736,
"end": 38891
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final Timestream2HeaderNameBuilder INSTANCE = new Timestream2HeaderNameBuilder();
/**
* The operation we want to perform.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamOperation}.
*/
public String awsTimestreamOperation() {
return "CamelAwsTimestreamOperation";
}
/**
* Represents a time-series data point being written into Timestream.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.Record} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamRecord}.
*/
public String awsTimestreamRecord() {
return "CamelAwsTimestreamRecord";
}
/**
* List of Records.
*
* The option is a: {@code List} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamRecordList}.
*/
public String awsTimestreamRecordList() {
return "CamelAwsTimestreamRecordList";
}
/**
* Status of Batch Load Task.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTaskStatus}.
*/
public String awsTimestreamTaskStatus() {
return "CamelAwsTimestreamTaskStatus";
}
/**
* The ID of the batch load task to resume.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTaskId}.
*/
public String awsTimestreamTaskId() {
return "CamelAwsTimestreamTaskId";
}
/**
* Name of Database.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamDatabaseName}.
*/
public String awsTimestreamDatabaseName() {
return "CamelAwsTimestreamDatabaseName";
}
/**
* Name of Table.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTableName}.
*/
public String awsTimestreamTableName() {
return "CamelAwsTimestreamTableName";
}
/**
* Name of Target Database.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamTargetDatabaseName}.
*/
public String awsTimestreamTargetDatabaseName() {
return "CamelAwsTimestreamTargetDatabaseName";
}
/**
* Name of Target Table.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTargetTableName}.
*/
public String awsTimestreamTargetTableName() {
return "CamelAwsTimestreamTargetTableName";
}
/**
* Record version.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamRecordVersion}.
*/
public String awsTimestreamRecordVersion() {
return "CamelAwsTimestreamRecordVersion";
}
/**
* Configuration of Data Model.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.DataModelConfiguration} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamDataModelConfiguration}.
*/
public String awsTimestreamDataModelConfiguration() {
return "CamelAwsTimestreamDataModelConfiguration";
}
/**
* Configuration of Data Source.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.DataSourceConfiguration} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamDataSourceConfiguration}.
*/
public String awsTimestreamDataSourceConfiguration() {
return "CamelAwsTimestreamDataSourceConfiguration";
}
/**
* Reporting Configuration.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.ReportConfiguration} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamReportConfiguration}.
*/
public String awsTimestreamReportConfiguration() {
return "CamelAwsTimestreamReportConfiguration";
}
/**
* Timestream Table Schema.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.Schema} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTableSchema}.
*/
public String awsTimestreamTableSchema() {
return "CamelAwsTimestreamTableSchema";
}
/**
* Timestream Table Retention Properties.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.RetentionProperties} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamRetentionProperties}.
*/
public String awsTimestreamRetentionProperties() {
return "CamelAwsTimestreamRetentionProperties";
}
/**
* Timestream Table Magentic Store Write properties.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamwrite.model.MagneticStoreWriteProperties} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamMagneticStoreWriteProperties}.
*/
public String awsTimestreamMagneticStoreWriteProperties() {
return "CamelAwsTimestreamMagneticStoreWriteProperties";
}
/**
* Name of Time column.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamTimeColumn}.
*/
public String awsTimestreamTimeColumn() {
return "CamelAwsTimestreamTimeColumn";
}
/**
* Name of the measure column.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamMeasureColumnName}.
*/
public String awsTimestreamMeasureColumnName() {
return "CamelAwsTimestreamMeasureColumnName";
}
/**
* This is to allow mapping column(s) from the query result to the
* dimension in the destination table.
*
* The option is a: {@code List} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamDimensionMappingList}.
*/
public String awsTimestreamDimensionMappingList() {
return "CamelAwsTimestreamDimensionMappingList";
}
/**
* Multi-measure mappings.
*
* The option is a: {@code
* software.amazon.awssdk.services.timestreamquery.model.MultiMeasureMappings} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamMultiMeasureMappings}.
*/
public String awsTimestreamMultiMeasureMappings() {
return "CamelAwsTimestreamMultiMeasureMappings";
}
/**
* Specifies how to map measures to multi-measure records.
*
* The option is a: {@code List} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamMixedMeasureMappingList}.
*/
public String awsTimestreamMixedMeasureMappingList() {
return "CamelAwsTimestreamMixedMeasureMappingList";
}
/**
* Name of scheduled query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduledQueryName}.
*/
public String awsTimestreamScheduledQueryName() {
return "CamelAwsTimestreamScheduledQueryName";
}
/**
* Arn of scheduled query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduledQueryArn}.
*/
public String awsTimestreamScheduledQueryArn() {
return "CamelAwsTimestreamScheduledQueryArn";
}
/**
* State of scheduled query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduledQueryState}.
*/
public String awsTimestreamScheduledQueryState() {
return "CamelAwsTimestreamScheduledQueryState";
}
/**
* Invocation Time for scheduled query execution.
*
* The option is a: {@code Instant} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduledQueryInvocationTime}.
*/
public String awsTimestreamScheduledQueryInvocationTime() {
return "CamelAwsTimestreamScheduledQueryInvocationTime";
}
/**
* The query string to run.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamQueryString}.
*/
public String awsTimestreamQueryString() {
return "CamelAwsTimestreamQueryString";
}
/**
* ID of query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamQueryId}.
*/
public String awsTimestreamQueryId() {
return "CamelAwsTimestreamQueryId";
}
/**
* Validates the prepared query, but does not store for later execution.
*
* The option is a: {@code Boolean} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamQueryValidateOnly}.
*/
public String awsTimestreamQueryValidateOnly() {
return "CamelAwsTimestreamQueryValidateOnly";
}
/**
* The total number of rows to be returned in the Query output.
*
* The option is a: {@code Integer} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamQueryMaxRows}.
*/
public String awsTimestreamQueryMaxRows() {
return "CamelAwsTimestreamQueryMaxRows";
}
/**
* Max Results to be returned in output.
*
* The option is a: {@code Integer} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamMaxResults}.
*/
public String awsTimestreamMaxResults() {
return "CamelAwsTimestreamMaxResults";
}
/**
* The schedule expression for the query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduleExpression}.
*/
public String awsTimestreamScheduleExpression() {
return "CamelAwsTimestreamScheduleExpression";
}
/**
* Notification Topic Arn for the scheduled query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamNotificationTopicArn}.
*/
public String awsTimestreamNotificationTopicArn() {
return "CamelAwsTimestreamNotificationTopicArn";
}
/**
* S3 Bucket name for error reporting.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamErrorReportS3BucketName}.
*/
public String awsTimestreamErrorReportS3BucketName() {
return "CamelAwsTimestreamErrorReportS3BucketName";
}
/**
* S3 object key prefix for error reporting.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamErrorReportS3ObjectKeyPrefix}.
*/
public String awsTimestreamErrorReportS3ObjectKeyPrefix() {
return "CamelAwsTimestreamErrorReportS3ObjectKeyPrefix";
}
/**
* S3 encryption option for error reporting.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamErrorReportS3EncryptionOption}.
*/
public String awsTimestreamErrorReportS3EncryptionOption() {
return "CamelAwsTimestreamErrorReportS3EncryptionOption";
}
/**
* he ARN for the IAM role that Timestream will assume when running the
* scheduled query.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* AwsTimestreamScheduledQueryExecutionRoleArn}.
*/
public String awsTimestreamScheduledQueryExecutionRoleArn() {
return "CamelAwsTimestreamScheduledQueryExecutionRoleArn";
}
/**
* Using a ClientToken makes the call to CreateScheduledQuery
* idempotent.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamClientToken}.
*/
public String awsTimestreamClientToken() {
return "CamelAwsTimestreamClientToken";
}
/**
* The Amazon KMS key used to encrypt the scheduled query resource,
* at-rest.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code AwsTimestreamKmsKeyId}.
*/
public String awsTimestreamKmsKeyId() {
return "CamelAwsTimestreamKmsKeyId";
}
}
static Timestream2EndpointBuilder endpointBuilder(String componentName, String path) {
| Timestream2HeaderNameBuilder |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/DefaultListableBeanFactoryTests.java | {
"start": 139639,
"end": 139819
} | class ____<T extends Repository<S, ID>, S, ID extends Serializable>
implements RepositoryFactoryInformation<S, ID>, FactoryBean<T> {
}
public static | RepositoryFactoryBeanSupport |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/configuration/ConfigurationMetadataWriter.java | {
"start": 965,
"end": 1173
} | interface ____ classes that can write metadata produced by a {@link ConfigurationMetadataBuilder}.
*
* @param metadataBuilder The metadata builder
* @param classWriterOutputVisitor The | for |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java | {
"start": 6007,
"end": 8570
} | class ____ extends ByteToMessageDecoder {
public static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096;
public static final int DEFAULT_MAX_HEADER_SIZE = 8192;
public static final boolean DEFAULT_CHUNKED_SUPPORTED = true;
public static final boolean DEFAULT_ALLOW_PARTIAL_CHUNKS = true;
public static final int DEFAULT_MAX_CHUNK_SIZE = 8192;
public static final boolean DEFAULT_VALIDATE_HEADERS = true;
public static final int DEFAULT_INITIAL_BUFFER_SIZE = 128;
public static final boolean DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS = false;
public static final boolean DEFAULT_STRICT_LINE_PARSING =
SystemPropertyUtil.getBoolean("io.netty.handler.codec.http.defaultStrictLineParsing", true);
private static final Runnable THROW_INVALID_CHUNK_EXTENSION = new Runnable() {
@Override
public void run() {
throw new InvalidChunkExtensionException();
}
};
private static final Runnable THROW_INVALID_LINE_SEPARATOR = new Runnable() {
@Override
public void run() {
throw new InvalidLineSeparatorException();
}
};
private final int maxChunkSize;
private final boolean chunkedSupported;
private final boolean allowPartialChunks;
/**
* This field is no longer used. It is only kept around for backwards compatibility purpose.
*/
@Deprecated
protected final boolean validateHeaders;
protected final HttpHeadersFactory headersFactory;
protected final HttpHeadersFactory trailersFactory;
private final boolean allowDuplicateContentLengths;
private final ByteBuf parserScratchBuffer;
private final Runnable defaultStrictCRLFCheck;
private final HeaderParser headerParser;
private final LineParser lineParser;
private HttpMessage message;
private long chunkSize;
private long contentLength = Long.MIN_VALUE;
private boolean chunked;
private boolean isSwitchingToNonHttp1Protocol;
private final AtomicBoolean resetRequested = new AtomicBoolean();
// These will be updated by splitHeader(...)
private AsciiString name;
private String value;
private LastHttpContent trailer;
@Override
protected void handlerRemoved0(ChannelHandlerContext ctx) throws Exception {
try {
parserScratchBuffer.release();
} finally {
super.handlerRemoved0(ctx);
}
}
/**
* The internal state of {@link HttpObjectDecoder}.
* <em>Internal use only</em>.
*/
private | HttpObjectDecoder |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/type/MapLikeType.java | {
"start": 534,
"end": 8663
} | class ____ extends TypeBase {
private static final long serialVersionUID = 1L;
/**
* Type of keys of Map.
*/
protected final JavaType _keyType;
/**
* Type of values of Map.
*/
protected final JavaType _valueType;
/*
/**********************************************************
* Life-cycle
/**********************************************************
*/
protected MapLikeType(Class<?> mapType, TypeBindings bindings,
JavaType superClass, JavaType[] superInts, JavaType keyT,
JavaType valueT, Object valueHandler, Object typeHandler,
boolean asStatic) {
super(mapType, bindings, superClass, superInts,
31 * keyT.hashCode() + valueT.hashCode(),
valueHandler, typeHandler, asStatic);
_keyType = keyT;
_valueType = valueT;
}
/**
* @since 2.7
*/
protected MapLikeType(TypeBase base, JavaType keyT, JavaType valueT) {
super(base);
_keyType = keyT;
_valueType = valueT;
}
/**
* Factory method that can be used to "upgrade" a basic type into
* collection-like one; usually done via {@link TypeModifier}
*/
public static MapLikeType upgradeFrom(JavaType baseType, JavaType keyT,
JavaType valueT) {
// 19-Oct-2015, tatu: Not sure if and how other types could be used as
// base;
// will cross that bridge if and when need be
if (baseType instanceof TypeBase base) {
return new MapLikeType(base, keyT, valueT);
}
throw new IllegalArgumentException(
"Cannot upgrade from an instance of " + baseType.getClass());
}
public MapLikeType withKeyType(JavaType keyType) {
if (keyType == _keyType) {
return this;
}
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, keyType, _valueType, _valueHandler,
_typeHandler, _asStatic);
}
@Override
public JavaType withContentType(JavaType contentType) {
if (_valueType == contentType) {
return this;
}
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, contentType, _valueHandler,
_typeHandler, _asStatic);
}
@Override
public MapLikeType withTypeHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, _valueType, _valueHandler, h,
_asStatic);
}
@Override
public MapLikeType withContentTypeHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, _valueType.withTypeHandler(h),
_valueHandler, _typeHandler, _asStatic);
}
@Override
public MapLikeType withValueHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, _valueType, h, _typeHandler,
_asStatic);
}
@Override
public MapLikeType withContentValueHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, _valueType.withValueHandler(h),
_valueHandler, _typeHandler, _asStatic);
}
@Override
public JavaType withHandlersFrom(JavaType src) {
JavaType type = super.withHandlersFrom(src);
JavaType srcKeyType = src.getKeyType();
// "withKeyType()" not part of JavaType, hence must verify:
if (type instanceof MapLikeType mapLikeType) {
if (srcKeyType != null) {
JavaType ct = _keyType.withHandlersFrom(srcKeyType);
if (ct != _keyType) {
type = mapLikeType.withKeyType(ct);
}
}
}
JavaType srcCt = src.getContentType();
if (srcCt != null) {
JavaType ct = _valueType.withHandlersFrom(srcCt);
if (ct != _valueType) {
type = type.withContentType(ct);
}
}
return type;
}
@Override
public MapLikeType withStaticTyping() {
if (_asStatic) {
return this;
}
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType, _valueType.withStaticTyping(),
_valueHandler, _typeHandler, true);
}
@Override
public JavaType refine(Class<?> rawType, TypeBindings bindings,
JavaType superClass, JavaType[] superInterfaces) {
return new MapLikeType(rawType, bindings, superClass, superInterfaces,
_keyType, _valueType, _valueHandler, _typeHandler, _asStatic);
}
@Override
protected String buildCanonicalName() {
StringBuilder sb = new StringBuilder();
sb.append(_class.getName());
// 10-Apr-2021, tatu: [databind#3108] Ensure we have at least nominally
// compatible type declaration (weak guarantee but better than nothing)
if ((_keyType != null) && _hasNTypeParameters(2)) {
sb.append('<');
sb.append(_keyType.toCanonical());
sb.append(',');
sb.append(_valueType.toCanonical());
sb.append('>');
}
return sb.toString();
}
/*
/**********************************************************
/* Public API
/**********************************************************
*/
@Override
public boolean isContainerType() {
return true;
}
@Override
public boolean isMapLikeType() {
return true;
}
@Override
public JavaType getKeyType() {
return _keyType;
}
@Override
public JavaType getContentType() {
return _valueType;
}
@Override
public Object getContentValueHandler() {
return _valueType.getValueHandler();
}
@Override
public Object getContentTypeHandler() {
return _valueType.getTypeHandler();
}
@Override
public boolean hasHandlers() {
return super.hasHandlers() || _valueType.hasHandlers()
|| _keyType.hasHandlers();
}
@Override
public StringBuilder getErasedSignature(StringBuilder sb) {
return _classSignature(_class, sb, true);
}
@Override
public StringBuilder getGenericSignature(StringBuilder sb) {
_classSignature(_class, sb, false);
sb.append('<');
_keyType.getGenericSignature(sb);
_valueType.getGenericSignature(sb);
sb.append(">;");
return sb;
}
/*
/**********************************************************
/* Extended API
/**********************************************************
*/
public MapLikeType withKeyTypeHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType.withTypeHandler(h), _valueType,
_valueHandler, _typeHandler, _asStatic);
}
public MapLikeType withKeyValueHandler(Object h) {
return new MapLikeType(_class, _bindings, _superClass,
_superInterfaces, _keyType.withValueHandler(h), _valueType,
_valueHandler, _typeHandler, _asStatic);
}
/*
/**********************************************************************
/* Standard methods
/**********************************************************************
*/
@Override
public String toString() {
return String.format("[map-like type; class %s, %s -> %s]",
_class.getName(), _keyType, _valueType);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (o == null) return false;
if (o.getClass() != getClass()) return false;
MapLikeType other = (MapLikeType) o;
return (_class == other._class) && _keyType.equals(other._keyType)
&& _valueType.equals(other._valueType);
}
}
| MapLikeType |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueue.java | {
"start": 1510,
"end": 6025
} | class ____<T extends HeapPriorityQueueElement>
implements InternalPriorityQueue<T> {
/** The array that represents the heap-organized priority queue. */
@Nonnull protected T[] queue;
/** The current size of the priority queue. */
@Nonnegative protected int size;
@SuppressWarnings("unchecked")
public AbstractHeapPriorityQueue(@Nonnegative int minimumCapacity) {
this.queue = (T[]) new HeapPriorityQueueElement[getHeadElementIndex() + minimumCapacity];
this.size = 0;
}
@Override
@Nullable
public T poll() {
return size() > 0 ? removeInternal(getHeadElementIndex()) : null;
}
@Override
@Nullable
public T peek() {
// References to removed elements are expected to become set to null.
return queue[getHeadElementIndex()];
}
@Override
public boolean add(@Nonnull T toAdd) {
addInternal(toAdd);
return toAdd.getInternalIndex() == getHeadElementIndex();
}
@Override
public boolean remove(@Nonnull T toRemove) {
final int elementIndex = toRemove.getInternalIndex();
removeInternal(elementIndex);
return elementIndex == getHeadElementIndex();
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public int size() {
return size;
}
@Override
public void addAll(@Nullable Collection<? extends T> toAdd) {
if (toAdd == null) {
return;
}
resizeForBulkLoad(toAdd.size());
for (T element : toAdd) {
add(element);
}
}
@SuppressWarnings({"unchecked"})
@Nonnull
public <O> O[] toArray(O[] out) {
final int heapArrayOffset = getHeadElementIndex();
if (out.length < size) {
return (O[])
Arrays.copyOfRange(
queue, heapArrayOffset, heapArrayOffset + size, out.getClass());
} else {
System.arraycopy(queue, heapArrayOffset, out, 0, size);
if (out.length > size) {
out[size] = null;
}
return out;
}
}
/**
* Returns an iterator over the elements in this queue. The iterator does not return the
* elements in any particular order.
*
* @return an iterator over the elements in this queue.
*/
@Nonnull
@Override
public CloseableIterator<T> iterator() {
return new HeapIterator();
}
/** Clears the queue. */
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
}
protected void resizeForBulkLoad(int totalSize) {
if (totalSize > queue.length) {
int desiredSize = totalSize + (totalSize >>> 3);
resizeQueueArray(desiredSize, totalSize);
}
}
protected void resizeQueueArray(int desiredSize, int minRequiredSize) {
if (isValidArraySize(desiredSize)) {
queue = Arrays.copyOf(queue, desiredSize);
} else if (isValidArraySize(minRequiredSize)) {
queue = Arrays.copyOf(queue, MAX_ARRAY_SIZE);
} else {
throw new OutOfMemoryError(
"Required minimum heap size "
+ minRequiredSize
+ " exceeds maximum size of "
+ MAX_ARRAY_SIZE
+ ".");
}
}
protected void moveElementToIdx(T element, int idx) {
queue[idx] = element;
element.setInternalIndex(idx);
}
/**
* Implements how to remove the element at the given index from the queue.
*
* @param elementIndex the index to remove.
* @return the removed element.
*/
protected abstract T removeInternal(@Nonnegative int elementIndex);
/**
* Implements how to add an element to the queue.
*
* @param toAdd the element to add.
*/
protected abstract void addInternal(@Nonnull T toAdd);
/** Returns the start index of the queue elements in the array. */
protected abstract int getHeadElementIndex();
private static boolean isValidArraySize(int size) {
return size >= 0 && size <= MAX_ARRAY_SIZE;
}
/**
* {@link Iterator} implementation for {@link HeapPriorityQueue}. {@link Iterator#remove()} is
* not supported.
*/
private final | AbstractHeapPriorityQueue |
java | spring-projects__spring-framework | spring-expression/src/main/java/org/springframework/expression/spel/ast/ValueRef.java | {
"start": 1418,
"end": 2012
} | interface ____ {
/**
* Returns the value this ValueRef points to, it should not require expression
* component re-evaluation.
* @return the value
*/
TypedValue getValue();
/**
* Sets the value this ValueRef points to, it should not require expression component
* re-evaluation.
* @param newValue the new value
*/
void setValue(@Nullable Object newValue);
/**
* Indicates whether calling setValue(Object) is supported.
* @return true if setValue() is supported for this value reference.
*/
boolean isWritable();
/**
* A ValueRef for the null value.
*/
| ValueRef |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestHdfsManifestToResourcesPlugin.java | {
"start": 2191,
"end": 6883
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestHdfsManifestToResourcesPlugin.class);
private Configuration conf;
private String tmpPath = new StringBuilder(
System.getProperty("test.build.data"))
.append('/').append("hadoop.tmp.dir").toString();
private static final String LAYER_MEDIA_TYPE =
"application/vnd.docker.image.rootfs.diff.tar.gzip";
private static final String CONFIG_MEDIA_TYPE =
"application/vnd.docker.container.image.v1+json";
@BeforeEach
public void setup() {
conf = new Configuration();
File tmpDir = new File(tmpPath);
tmpDir.mkdirs();
}
@AfterEach
public void cleanUp() throws IOException {
File tmpDir = new File(tmpPath);
FileUtils.deleteDirectory(tmpDir);
}
@Test
public void testGetLayerResources() throws IOException {
ImageManifest mockManifest = mock(ImageManifest.class);
ImageManifest.Blob mockLayer1 = mock(ImageManifest.Blob.class);
ImageManifest.Blob mockLayer2 = mock(ImageManifest.Blob.class);
String digest1Hash =
"e060f9dd9e8cd9ec0e2814b661a96d78f7298120d7654ba9f83ebfb11ff1fb1e";
String digest2Hash =
"5af5ff88469c8473487bfbc2fe81b4e7d84644bd91f1ab9305de47ef5673637e";
String digest1 =
"sha256:" + digest1Hash;
String digest2 =
"sha256:" + digest2Hash;
long size1 = 1234;
long size2 = 5678;
when(mockLayer1.getMediaType()).thenReturn(LAYER_MEDIA_TYPE);
when(mockLayer1.getDigest()).thenReturn(digest1);
when(mockLayer1.getSize()).thenReturn(size1);
when(mockLayer2.getMediaType()).thenReturn(LAYER_MEDIA_TYPE);
when(mockLayer2.getDigest()).thenReturn(digest2);
when(mockLayer2.getSize()).thenReturn(size2);
ArrayList<ImageManifest.Blob> mockLayers = new ArrayList<>();
mockLayers.add(mockLayer1);
mockLayers.add(mockLayer2);
when(mockManifest.getLayers()).thenReturn(mockLayers);
conf.set(NM_RUNC_IMAGE_TOPLEVEL_DIR, tmpPath);
long modTime = 123456789;
HdfsManifestToResourcesPlugin hdfsManifestToResourcesPlugin =
new HdfsManifestToResourcesPlugin() {
@Override
protected FileStatus statBlob(Path path) throws IOException {
FileStatus mockFileStatus = mock(FileStatus.class);
when(mockFileStatus.getModificationTime()).thenReturn(modTime);
return mockFileStatus;
}
};
hdfsManifestToResourcesPlugin.init(conf);
List<LocalResource> returnedLayers =
hdfsManifestToResourcesPlugin.getLayerResources(mockManifest);
URL url1 = URL.fromPath(new Path(tmpPath + "/layers",
digest1Hash + ".sqsh"));
URL url2 = URL.fromPath(new Path(tmpPath + "/layers",
digest2Hash + ".sqsh"));
LocalResource rsrc1 = LocalResource.newInstance(url1,
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
size1, modTime);
LocalResource rsrc2 = LocalResource.newInstance(url2,
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
size2, modTime);
assertEquals(rsrc1, returnedLayers.get(0));
assertEquals(rsrc2, returnedLayers.get(1));
}
@Test
public void testGetConfigResources() throws IOException {
ImageManifest mockManifest = mock(ImageManifest.class);
ImageManifest.Blob mockConfig = mock(ImageManifest.Blob.class);
String digestHash =
"e23cac476d0238f0f859c1e07e5faad85262bca490ef5c3a9da32a5b39c6b204";
String digest =
"sha256:" + digestHash;
long size = 1234;
when(mockConfig.getMediaType()).thenReturn(CONFIG_MEDIA_TYPE);
when(mockConfig.getDigest()).thenReturn(digest);
when(mockConfig.getSize()).thenReturn(size);
when(mockManifest.getConfig()).thenReturn(mockConfig);
conf.set(NM_RUNC_IMAGE_TOPLEVEL_DIR, tmpPath);
long modTime = 123456789;
HdfsManifestToResourcesPlugin hdfsManifestToResourcesPlugin =
new HdfsManifestToResourcesPlugin() {
@Override
protected FileStatus statBlob(Path path) throws IOException {
FileStatus mockFileStatus = mock(FileStatus.class);
when(mockFileStatus.getModificationTime()).thenReturn(modTime);
return mockFileStatus;
}
};
hdfsManifestToResourcesPlugin.init(conf);
LocalResource returnedLayer =
hdfsManifestToResourcesPlugin.getConfigResource(mockManifest);
URL url1 = URL.fromPath(new Path(tmpPath + "/config", digestHash));
LocalResource rsrc = LocalResource.newInstance(url1,
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
size, modTime);
assertEquals(rsrc, returnedLayer);
}
}
| TestHdfsManifestToResourcesPlugin |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/casting/RowDataToStringConverterImpl.java | {
"start": 1591,
"end": 3957
} | class ____ implements RowDataToStringConverter {
private final DataType dataType;
private final CastRule.Context castRuleContext;
private Function<RowData, String>[] columnConverters;
public RowDataToStringConverterImpl(
DataType dataType,
ZoneId zoneId,
ClassLoader classLoader,
boolean legacyBehaviour,
CodeGeneratorContext codeGeneratorContext) {
this.dataType = dataType;
this.castRuleContext =
CastRule.Context.create(
true, legacyBehaviour, zoneId, classLoader, codeGeneratorContext);
}
@SuppressWarnings("unchecked")
private void init() {
List<DataType> rowDataTypes = DataType.getFieldDataTypes(dataType);
this.columnConverters = new Function[rowDataTypes.size()];
for (int i = 0; i < rowDataTypes.size(); i++) {
final int index = i;
LogicalType fieldType = rowDataTypes.get(index).getLogicalType();
RowData.FieldGetter getter = RowData.createFieldGetter(fieldType, index);
CastExecutor<Object, StringData> castExecutor =
(CastExecutor<Object, StringData>)
CastRuleProvider.create(
castRuleContext, fieldType, VarCharType.STRING_TYPE);
if (castExecutor == null) {
throw new IllegalStateException(
"Cannot create a cast executor for converting "
+ fieldType
+ " to string. This is a bug, please open an issue.");
}
this.columnConverters[index] =
row -> {
if (row.isNullAt(index)) {
return PrintStyle.NULL_VALUE;
}
return castExecutor.cast(getter.getFieldOrNull(row)).toString();
};
}
}
@Override
public String[] convert(RowData rowData) {
if (this.columnConverters == null) {
init();
}
String[] result = new String[rowData.getArity()];
for (int i = 0; i < result.length; i++) {
result[i] = this.columnConverters[i].apply(rowData);
}
return result;
}
}
| RowDataToStringConverterImpl |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/unsatisfied/UnsatisfiedMatchVetoedTypeTest.java | {
"start": 1177,
"end": 1281
} | class ____ {
@Inject
FooService foo;
}
@Vetoed
@Singleton
static | Consumer |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java | {
"start": 36402,
"end": 36808
} | class ____ extends DrainDispatcher {
int eventreceived = 0;
@SuppressWarnings("rawtypes")
@Override
protected void dispatch(Event event) {
if (event.getType() == RMFatalEventType.TRANSITION_TO_ACTIVE_FAILED) {
eventreceived++;
} else {
super.dispatch(event);
}
}
public int getEventCount() {
return eventreceived;
}
}
}
| FailFastDispatcher |
java | alibaba__nacos | config/src/test/java/com/alibaba/nacos/config/server/aspect/ConfigChangeAspectTest.java | {
"start": 2604,
"end": 12271
} | class ____ {
ConfigChangeAspect configChangeAspect;
ConfigChangeConfigs configChangeConfigs;
@Mock
ConfigChangePluginService configChangePluginService;
MockedStatic<PropertiesUtil> propertiesStatic;
MockedStatic<RequestUtil> requestUtilMockedStatic;
@Mock
private ProceedingJoinPoint pjp;
@Mock
private ConfigForm configForm;
@Mock
private ConfigRequestInfo configRequestInfo;
@BeforeEach
void before() {
//mock config change service enabled.
propertiesStatic = Mockito.mockStatic(PropertiesUtil.class);
requestUtilMockedStatic = Mockito.mockStatic(RequestUtil.class);
Properties properties = new Properties();
properties.put("mockedConfigChangeService.enabled", "true");
propertiesStatic.when(() -> PropertiesUtil.getPropertiesWithPrefix(any(),
eq(ConfigChangeConstants.NACOS_CORE_CONFIG_PLUGIN_PREFIX))).thenReturn(properties);
requestUtilMockedStatic.when(() -> RequestUtil.getSrcUserName(any(HttpServletRequest.class)))
.thenReturn("mockedUser");
Mockito.when(configChangePluginService.getServiceType()).thenReturn("mockedConfigChangeService");
Mockito.when(configChangePluginService.pointcutMethodNames()).thenReturn(ConfigChangePointCutTypes.values());
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_AFTER_TYPE);
ConfigChangePluginManager.join(configChangePluginService);
configChangeConfigs = new ConfigChangeConfigs();
configChangeAspect = new ConfigChangeAspect(configChangeConfigs);
}
@AfterEach
void after() {
propertiesStatic.close();
requestUtilMockedStatic.close();
ConfigChangePluginManager.reset();
}
@Test
void testPublishOrUpdateConfigAround() throws Throwable {
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_AFTER_TYPE);
when(pjp.getArgs()).thenReturn(new Object[] {configForm, configRequestInfo});
when(configForm.getDataId()).thenReturn("dataId");
when(configForm.getGroup()).thenReturn("group");
when(configForm.getNamespaceId()).thenReturn("namespaceId");
when(configForm.getContent()).thenReturn("content");
when(configRequestInfo.getSrcIp()).thenReturn("127.0.0.1");
when(configRequestInfo.getSrcType()).thenReturn("http");
when(pjp.proceed(any())).thenReturn("Success");
Object o = configChangeAspect.publishOrUpdateConfigAround(pjp);
Thread.sleep(20L);
// expect service executed.
verify(configChangePluginService, Mockito.times(1))
.execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class));
//expect join point processed success.
assertEquals("Success", o);
}
@Test
void testRemoveConfigByIdAround() throws Throwable {
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_AFTER_TYPE);
String dataId = "dataId1";
String group = "group1";
String namespaceId = "namespaceId1";
String tag = "tag1";
String clientIp = "127.0.0.1";
String srcUser = "mockedUser";
String srcType = "http";
when(pjp.getArgs()).thenReturn(new Object[] {dataId, group, namespaceId, tag, clientIp, srcUser, srcType});
Mockito.when(pjp.proceed(any())).thenReturn("mock success return");
Object o = configChangeAspect.removeConfigByIdAround(pjp);
Thread.sleep(20L);
// expect service executed.
verify(configChangePluginService, Mockito.times(1))
.execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class));
//expect join point processed success.
assertEquals("mock success return", o);
}
@Test
void testDisEnablePluginService() throws Throwable {
Properties properties = new Properties();
properties.put("mockedConfigChangeService.enabled", "false");
String dataId = "dataId1";
String group = "group1";
String namespaceId = "namespaceId1";
String tag = "tag1";
String clientIp = "127.0.0.1";
String srcUser = "mockedUser";
String srcType = "http";
when(pjp.getArgs()).thenReturn(new Object[] {dataId, group, namespaceId, tag, clientIp, srcUser, srcType});
propertiesStatic.when(() -> PropertiesUtil.getPropertiesWithPrefix(any(),
eq(ConfigChangeConstants.NACOS_CORE_CONFIG_PLUGIN_PREFIX))).thenReturn(properties);
configChangeConfigs.onEvent(ServerConfigChangeEvent.newEvent());
assertFalse(Boolean.parseBoolean(
configChangeConfigs.getPluginProperties("mockedConfigChangeService").getProperty("enabled")));
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_BEFORE_TYPE);
Mockito.when(configChangePluginService.getServiceType()).thenReturn("mockedConfigChangeService");
ConfigPublishResponse configPublishResponse = ConfigPublishResponse.buildSuccessResponse();
Mockito.when(pjp.proceed()).thenReturn(configPublishResponse);
//execute
Object o = configChangeAspect.removeConfigByIdAround(pjp);
//expect
verify(configChangePluginService, Mockito.times(0))
.execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class));
assertEquals(configPublishResponse, o);
}
@Test
void testBeforePluginFailurePreventsProceed() throws Throwable {
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_BEFORE_TYPE);
Mockito.doAnswer(invocation -> {
ConfigChangeResponse response = invocation.getArgument(1);
response.setSuccess(false);
response.setMsg("Before plugin failed");
return null;
}).when(configChangePluginService).execute(any(), any());
when(pjp.getArgs()).thenReturn(new Object[]{configForm, configRequestInfo});
when(configForm.getDataId()).thenReturn("dataId");
when(configRequestInfo.getSrcType()).thenReturn("http");
Object result = configChangeAspect.publishOrUpdateConfigAround(pjp);
verify(pjp, never()).proceed();
assertEquals(false, result);
verify(configChangePluginService).execute(any(), any());
}
@Test
void testProceedThrowsExceptionHandled() throws Throwable {
when(pjp.getArgs()).thenReturn(new Object[]{configForm, configRequestInfo});
when(configForm.getDataId()).thenReturn("dataId");
when(configRequestInfo.getSrcType()).thenReturn("http");
when(pjp.proceed(any())).thenThrow(new RuntimeException("Proceed error"));
Object result = configChangeAspect.publishOrUpdateConfigAround(pjp);
assertEquals(false, result);
}
@Test
void testAfterPluginExecutedAsynchronously() throws Throwable {
CountDownLatch latch = new CountDownLatch(1);
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_AFTER_TYPE);
Mockito.doAnswer(invocation -> {
latch.countDown();
return null;
}).when(configChangePluginService).execute(any(), any());
when(pjp.getArgs()).thenReturn(new Object[]{configForm, configRequestInfo});
when(configForm.getDataId()).thenReturn("dataId");
when(configRequestInfo.getSrcType()).thenReturn("http");
when(pjp.proceed()).thenReturn("Success");
Object result = configChangeAspect.publishOrUpdateConfigAround(pjp);
assertTrue(latch.await(500, TimeUnit.MILLISECONDS));
verify(configChangePluginService).execute(any(), any());
assertEquals(null, result);
}
@Test
void testRpcSourceTypeHandling() throws Throwable {
when(pjp.getArgs()).thenReturn(new Object[]{configForm, configRequestInfo});
when(configRequestInfo.getSrcType()).thenReturn("rpc");
when(configForm.getDataId()).thenReturn("dataId");
when(pjp.proceed()).thenReturn("Success");
configChangeAspect.publishOrUpdateConfigAround(pjp);
ArgumentCaptor<ConfigChangeRequest> requestCaptor = ArgumentCaptor.forClass(ConfigChangeRequest.class);
verify(configChangePluginService).execute(requestCaptor.capture(), any());
assertEquals(ConfigChangePointCutTypes.PUBLISH_BY_RPC, requestCaptor.getValue().getRequestType());
}
@Test
void testNoPluginsEnabled() throws Throwable {
Properties properties = new Properties();
properties.put("mockedConfigChangeService.enabled", "false");
propertiesStatic.when(() -> PropertiesUtil.getPropertiesWithPrefix(any(), any())).thenReturn(properties);
configChangeConfigs.onEvent(ServerConfigChangeEvent.newEvent());
when(pjp.getArgs()).thenReturn(new Object[]{configForm, configRequestInfo});
when(configRequestInfo.getSrcType()).thenReturn("http");
when(pjp.proceed()).thenReturn("Success");
Object result = configChangeAspect.publishOrUpdateConfigAround(pjp);
verify(configChangePluginService, never()).execute(any(), any());
assertEquals("Success", result);
}
}
| ConfigChangeAspectTest |
java | apache__flink | flink-datastream/src/main/java/org/apache/flink/datastream/impl/watermark/DefaultWatermarkManager.java | {
"start": 1315,
"end": 2372
} | class ____ implements WatermarkManager {
private final Output<?> streamRecordOutput;
private final Map<String, AbstractInternalWatermarkDeclaration<?>> watermarkDeclarationMap;
public DefaultWatermarkManager(
Output<?> streamRecordOutput,
Map<String, AbstractInternalWatermarkDeclaration<?>> watermarkDeclarationMap) {
this.streamRecordOutput = streamRecordOutput;
this.watermarkDeclarationMap = watermarkDeclarationMap;
}
@Override
public void emitWatermark(Watermark watermark) {
Preconditions.checkState(
watermarkDeclarationMap.containsKey(watermark.getIdentifier()),
"Watermark identifier "
+ watermark.getIdentifier()
+ " does not exist, please declare it.");
streamRecordOutput.emitWatermark(
new WatermarkEvent(
watermark,
watermarkDeclarationMap.get(watermark.getIdentifier()).isAligned()));
}
}
| DefaultWatermarkManager |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/joinfetch/JoinFetchNestedAssociationsTest.java | {
"start": 3518,
"end": 3628
} | class ____ extends BaseBEntity {
}
@Entity( name = "B2Entity" )
@Table( name = "b_entities" )
static | B1Entity |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.