language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/MetricsDataStreamIT.java | {
"start": 690,
"end": 3808
} | class ____ extends AbstractDataStreamIT {
@SuppressWarnings("unchecked")
public void testCustomMapping() throws Exception {
{
Request request = new Request("POST", "/_component_template/metrics@custom");
request.setJsonEntity("""
{
"template": {
"settings": {
"index": {
"query": {
"default_field": ["custom-message"]
}
}
},
"mappings": {
"properties": {
"numeric_field": {
"type": "integer"
},
"socket": {
"properties": {
"ip": {
"type": "keyword"
}
}
}
}
}
}
}
""");
assertOK(client.performRequest(request));
}
String dataStreamName = "metrics-generic-default";
createDataStream(client, dataStreamName);
String backingIndex = getWriteBackingIndex(client, dataStreamName);
// Verify that the custom settings.index.query.default_field overrides the default query field - "message"
Map<String, Object> settings = getSettings(client, backingIndex);
assertThat(settings.get("index.query.default_field"), is(List.of("custom-message")));
// Verify that the new field from the custom component template is applied
putMapping(client, backingIndex);
Map<String, Object> mappingProperties = getMappingProperties(client, backingIndex);
assertThat(getValueFromPath(mappingProperties, List.of("numeric_field", "type")), equalTo("integer"));
assertThat(getValueFromPath(mappingProperties, List.of("socket", "properties", "ip", "type")), is("keyword"));
// Insert valid doc and verify successful indexing
{
indexDoc(client, dataStreamName, """
{
"@timestamp": "2024-06-10",
"test": "doc-with-ip",
"socket": {
"ip": "127.0.0.1"
}
}
""");
List<Object> results = searchDocs(client, dataStreamName, """
{
"query": {
"term": {
"test": {
"value": "doc-with-ip"
}
}
},
"fields": ["socket.ip"]
}
""");
Map<String, Object> fields = ((Map<String, Map<String, Object>>) results.get(0)).get("_source");
assertThat(fields.get("socket"), is(Map.of("ip", "127.0.0.1")));
}
}
@Override
protected String indexTemplateName() {
return "metrics";
}
}
| MetricsDataStreamIT |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/RecordProcessorUtils.java | {
"start": 1625,
"end": 8747
} | class ____ {
private static final String METHOD_SET_KEY_CONTEXT_ELEMENT = "setKeyContextElement";
private static final String METHOD_SET_KEY_CONTEXT_ELEMENT1 = "setKeyContextElement1";
private static final String METHOD_SET_KEY_CONTEXT_ELEMENT2 = "setKeyContextElement2";
/**
* Get record processor for {@link Input}, which will omit call of {@link
* Input#setKeyContextElement} if it doesn't have key context.
*
* @param input the {@link Input}
* @return the record processor
*/
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor(
Input<T> input) {
boolean canOmitSetKeyContext;
if (input instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) input, 0);
} else {
canOmitSetKeyContext =
input instanceof KeyContextHandler
&& !((KeyContextHandler) input).hasKeyContext();
}
if (canOmitSetKeyContext) {
return input::processElement;
} else if (input instanceof AsyncKeyOrderedProcessing
&& ((AsyncKeyOrderedProcessing) input).isAsyncKeyOrderedProcessingEnabled()) {
return ((AsyncKeyOrderedProcessing) input).getRecordProcessor(1);
} else {
return record -> {
input.setKeyContextElement(record);
input.processElement(record);
};
}
}
/**
* Get record processor for the first input of {@link TwoInputStreamOperator}, which will omit
* call of {@link StreamOperator#setKeyContextElement1} if it doesn't have key context.
*
* @param operator the {@link TwoInputStreamOperator}
* @return the record processor
*/
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor1(
TwoInputStreamOperator<T, ?, ?> operator) {
boolean canOmitSetKeyContext;
if (operator instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) operator, 0);
} else {
canOmitSetKeyContext =
operator instanceof KeyContextHandler
&& !((KeyContextHandler) operator).hasKeyContext1();
}
if (canOmitSetKeyContext) {
return operator::processElement1;
} else if (operator instanceof AsyncKeyOrderedProcessing
&& ((AsyncKeyOrderedProcessing) operator).isAsyncKeyOrderedProcessingEnabled()) {
return ((AsyncKeyOrderedProcessing) operator).getRecordProcessor(1);
} else {
return record -> {
operator.setKeyContextElement1(record);
operator.processElement1(record);
};
}
}
/**
* Get record processor for the second input of {@link TwoInputStreamOperator}, which will omit
* call of {@link StreamOperator#setKeyContextElement2} if it doesn't have key context.
*
* @param operator the {@link TwoInputStreamOperator}
* @return the record processor
*/
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor2(
TwoInputStreamOperator<?, T, ?> operator) {
boolean canOmitSetKeyContext;
if (operator instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) operator, 1);
} else {
canOmitSetKeyContext =
operator instanceof KeyContextHandler
&& !((KeyContextHandler) operator).hasKeyContext2();
}
if (canOmitSetKeyContext) {
return operator::processElement2;
} else if (operator instanceof AsyncKeyOrderedProcessing
&& ((AsyncKeyOrderedProcessing) operator).isAsyncKeyOrderedProcessingEnabled()) {
return ((AsyncKeyOrderedProcessing) operator).getRecordProcessor(2);
} else {
return record -> {
operator.setKeyContextElement2(record);
operator.processElement2(record);
};
}
}
private static boolean canOmitSetKeyContext(
AbstractStreamOperator<?> streamOperator, int input) {
// Since AbstractStreamOperator is @PublicEvolving, we need to check whether the
// "SetKeyContextElement" is overridden by the (user-implemented) subclass. If it is
// overridden, we cannot omit it due to the subclass may maintain different key selectors on
// its own.
return !hasKeyContext(streamOperator, input)
&& !methodSetKeyContextIsOverridden(streamOperator, input);
}
private static boolean hasKeyContext(AbstractStreamOperator<?> operator, int input) {
if (input == 0) {
return operator.hasKeyContext1();
} else {
return operator.hasKeyContext2();
}
}
private static boolean methodSetKeyContextIsOverridden(
AbstractStreamOperator<?> operator, int input) {
if (input == 0) {
if (operator instanceof OneInputStreamOperator) {
return methodIsOverridden(
operator,
OneInputStreamOperator.class,
METHOD_SET_KEY_CONTEXT_ELEMENT,
StreamRecord.class)
|| methodIsOverridden(
operator,
AbstractStreamOperator.class,
METHOD_SET_KEY_CONTEXT_ELEMENT1,
StreamRecord.class);
} else {
return methodIsOverridden(
operator,
AbstractStreamOperator.class,
METHOD_SET_KEY_CONTEXT_ELEMENT1,
StreamRecord.class);
}
} else {
return methodIsOverridden(
operator,
AbstractStreamOperator.class,
METHOD_SET_KEY_CONTEXT_ELEMENT2,
StreamRecord.class);
}
}
private static boolean methodIsOverridden(
AbstractStreamOperator<?> operator,
Class<?> expectedDeclaringClass,
String methodName,
Class<?>... parameterTypes) {
try {
Class<?> methodDeclaringClass =
operator.getClass().getMethod(methodName, parameterTypes).getDeclaringClass();
return methodDeclaringClass != expectedDeclaringClass;
} catch (NoSuchMethodException exception) {
throw new FlinkRuntimeException(
String.format(
"BUG: Can't find '%s' method in '%s'",
methodName, operator.getClass()));
}
}
/** Private constructor to prevent instantiation. */
private RecordProcessorUtils() {}
}
| RecordProcessorUtils |
java | apache__flink | flink-libraries/flink-state-processing-api/src/main/java/org/apache/flink/state/table/module/StateModuleFactory.java | {
"start": 1161,
"end": 1647
} | class ____ implements ModuleFactory {
@Override
public String factoryIdentifier() {
return StateModule.IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return Collections.emptySet();
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
return Collections.emptySet();
}
@Override
public Module createModule(Context context) {
return StateModule.INSTANCE;
}
}
| StateModuleFactory |
java | apache__dubbo | dubbo-serialization/dubbo-serialization-hessian2/src/main/java/org/apache/dubbo/common/serialize/hessian2/Hessian2ObjectOutput.java | {
"start": 1185,
"end": 3528
} | class ____ implements ObjectOutput, Cleanable {
private final Hessian2Output mH2o;
@Deprecated
public Hessian2ObjectOutput(OutputStream os) {
mH2o = new Hessian2Output(os);
Hessian2FactoryManager hessian2FactoryManager =
FrameworkModel.defaultModel().getBeanFactory().getOrRegisterBean(Hessian2FactoryManager.class);
mH2o.setSerializerFactory(hessian2FactoryManager.getSerializerFactory(
Thread.currentThread().getContextClassLoader()));
}
public Hessian2ObjectOutput(OutputStream os, Hessian2FactoryManager hessian2FactoryManager) {
mH2o = new Hessian2Output(os);
mH2o.setSerializerFactory(hessian2FactoryManager.getSerializerFactory(
Thread.currentThread().getContextClassLoader()));
}
@Override
public void writeBool(boolean v) throws IOException {
mH2o.writeBoolean(v);
}
@Override
public void writeByte(byte v) throws IOException {
mH2o.writeInt(v);
}
@Override
public void writeShort(short v) throws IOException {
mH2o.writeInt(v);
}
@Override
public void writeInt(int v) throws IOException {
mH2o.writeInt(v);
}
@Override
public void writeLong(long v) throws IOException {
mH2o.writeLong(v);
}
@Override
public void writeFloat(float v) throws IOException {
mH2o.writeDouble(v);
}
@Override
public void writeDouble(double v) throws IOException {
mH2o.writeDouble(v);
}
@Override
public void writeBytes(byte[] b) throws IOException {
mH2o.writeBytes(b);
}
@Override
public void writeBytes(byte[] b, int off, int len) throws IOException {
mH2o.writeBytes(b, off, len);
}
@Override
public void writeUTF(String v) throws IOException {
mH2o.writeString(v);
}
@Override
public void writeObject(Object obj) throws IOException {
mH2o.writeObject(obj);
}
@Override
public void flushBuffer() throws IOException {
mH2o.flushBuffer();
}
public OutputStream getOutputStream() throws IOException {
return mH2o.getBytesOutputStream();
}
@Override
public void cleanup() {
if (mH2o != null) {
mH2o.reset();
}
}
}
| Hessian2ObjectOutput |
java | elastic__elasticsearch | modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java | {
"start": 1278,
"end": 8999
} | class ____ extends AbstractXContentTestCase<MultiSearchTemplateResponse> {
@Override
protected MultiSearchTemplateResponse createTestInstance() {
int numItems = randomIntBetween(0, 128);
long overallTookInMillis = randomNonNegativeLong();
MultiSearchTemplateResponse.Item[] items = new MultiSearchTemplateResponse.Item[numItems];
for (int i = 0; i < numItems; i++) {
// Creating a minimal response is OK, because SearchResponse self
// is tested elsewhere.
long tookInMillis = randomNonNegativeLong();
int totalShards = randomIntBetween(1, Integer.MAX_VALUE);
int successfulShards = randomIntBetween(0, totalShards);
int skippedShards = totalShards - successfulShards;
SearchResponse.Clusters clusters = randomClusters();
SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse();
SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits(
null,
totalShards,
successfulShards,
skippedShards,
tookInMillis,
ShardSearchFailure.EMPTY_ARRAY,
clusters
);
searchTemplateResponse.setResponse(searchResponse);
items[i] = new MultiSearchTemplateResponse.Item(searchTemplateResponse, null);
}
return new MultiSearchTemplateResponse(items, overallTookInMillis);
}
private static SearchResponse.Clusters randomClusters() {
int totalClusters = randomIntBetween(0, 10);
int successfulClusters = randomIntBetween(0, totalClusters);
int skippedClusters = totalClusters - successfulClusters;
return new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters);
}
private static MultiSearchTemplateResponse createTestInstanceWithFailures() {
int numItems = randomIntBetween(0, 128);
long overallTookInMillis = randomNonNegativeLong();
MultiSearchTemplateResponse.Item[] items = new MultiSearchTemplateResponse.Item[numItems];
for (int i = 0; i < numItems; i++) {
if (randomBoolean()) {
// Creating a minimal response is OK, because SearchResponse is tested elsewhere.
long tookInMillis = randomNonNegativeLong();
int totalShards = randomIntBetween(1, Integer.MAX_VALUE);
int successfulShards = randomIntBetween(0, totalShards);
int skippedShards = totalShards - successfulShards;
SearchResponse.Clusters clusters = randomClusters();
SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse();
SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits(
null,
totalShards,
successfulShards,
skippedShards,
tookInMillis,
ShardSearchFailure.EMPTY_ARRAY,
clusters
);
searchTemplateResponse.setResponse(searchResponse);
items[i] = new MultiSearchTemplateResponse.Item(searchTemplateResponse, null);
} else {
items[i] = new MultiSearchTemplateResponse.Item(null, new ElasticsearchException("an error"));
}
}
return new MultiSearchTemplateResponse(items, overallTookInMillis);
}
@Override
protected MultiSearchTemplateResponse doParseInstance(XContentParser parser) {
// The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response
MultiSearchResponse mSearchResponse = SearchResponseUtils.parseMultiSearchResponse(parser);
try {
org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses();
MultiSearchTemplateResponse.Item[] templateResponses = new MultiSearchTemplateResponse.Item[responses.length];
int i = 0;
for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) {
SearchTemplateResponse stResponse = null;
if (item.getResponse() != null) {
stResponse = new SearchTemplateResponse();
stResponse.setResponse(item.getResponse());
item.getResponse().incRef();
}
templateResponses[i++] = new MultiSearchTemplateResponse.Item(stResponse, item.getFailure());
}
return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis());
} finally {
mSearchResponse.decRef();
}
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
protected Predicate<String> getRandomFieldsExcludeFilterWhenResultHasErrors() {
return field -> field.startsWith("responses");
}
@Override
protected void assertEqualInstances(MultiSearchTemplateResponse expectedInstance, MultiSearchTemplateResponse newInstance) {
assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook()));
assertThat(newInstance.getResponses().length, equalTo(expectedInstance.getResponses().length));
for (int i = 0; i < expectedInstance.getResponses().length; i++) {
MultiSearchTemplateResponse.Item expectedItem = expectedInstance.getResponses()[i];
MultiSearchTemplateResponse.Item actualItem = newInstance.getResponses()[i];
if (expectedItem.isFailure()) {
assertThat(actualItem.getResponse(), nullValue());
assertThat(actualItem.getFailureMessage(), containsString(expectedItem.getFailureMessage()));
} else {
assertThat(actualItem.getResponse().toString(), equalTo(expectedItem.getResponse().toString()));
assertThat(actualItem.getFailure(), nullValue());
}
}
}
/**
* Test parsing {@link MultiSearchTemplateResponse} with inner failures as they don't support asserting on xcontent equivalence, given
* exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()}
* without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end.
*/
public void testFromXContentWithFailures() throws IOException {
Supplier<MultiSearchTemplateResponse> instanceSupplier = MultiSearchTemplateResponseTests::createTestInstanceWithFailures;
// with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata,
// but that does not bother our assertions, as we only want to test that we don't break.
boolean supportsUnknownFields = true;
// exceptions are not of the same type whenever parsed back
boolean assertToXContentEquivalence = false;
AbstractXContentTestCase.testFromXContent(
NUMBER_OF_TEST_RUNS,
instanceSupplier,
supportsUnknownFields,
Strings.EMPTY_ARRAY,
getRandomFieldsExcludeFilterWhenResultHasErrors(),
this::createParser,
this::doParseInstance,
this::assertEqualInstances,
assertToXContentEquivalence,
ToXContent.EMPTY_PARAMS,
RefCounted::decRef
);
}
@Override
protected void dispose(MultiSearchTemplateResponse instance) {
instance.decRef();
}
}
| MultiSearchTemplateResponseTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java | {
"start": 1810,
"end": 16608
} | class ____ extends QuotaUsage.Builder {
public Builder() {
}
public Builder length(long length) {
this.length = length;
return this;
}
public Builder fileCount(long fileCount) {
this.fileCount = fileCount;
return this;
}
public Builder directoryCount(long directoryCount) {
this.directoryCount = directoryCount;
return this;
}
public Builder snapshotLength(long snapshotLength) {
this.snapshotLength = snapshotLength;
return this;
}
public Builder snapshotFileCount(long snapshotFileCount) {
this.snapshotFileCount = snapshotFileCount;
return this;
}
public Builder snapshotDirectoryCount(long snapshotDirectoryCount) {
this.snapshotDirectoryCount = snapshotDirectoryCount;
return this;
}
public Builder snapshotSpaceConsumed(long snapshotSpaceConsumed) {
this.snapshotSpaceConsumed = snapshotSpaceConsumed;
return this;
}
public Builder erasureCodingPolicy(String ecPolicy) {
this.erasureCodingPolicy = ecPolicy;
return this;
}
@Override
public Builder quota(long quota){
super.quota(quota);
return this;
}
@Override
public Builder spaceConsumed(long spaceConsumed) {
super.spaceConsumed(spaceConsumed);
return this;
}
@Override
public Builder spaceQuota(long spaceQuota) {
super.spaceQuota(spaceQuota);
return this;
}
@Override
public Builder typeConsumed(long typeConsumed[]) {
super.typeConsumed(typeConsumed);
return this;
}
@Override
public Builder typeQuota(StorageType type, long quota) {
super.typeQuota(type, quota);
return this;
}
@Override
public Builder typeConsumed(StorageType type, long consumed) {
super.typeConsumed(type, consumed);
return this;
}
@Override
public Builder typeQuota(long typeQuota[]) {
super.typeQuota(typeQuota);
return this;
}
public ContentSummary build() {
// Set it in case applications call QuotaUsage#getFileAndDirectoryCount.
super.fileAndDirectoryCount(this.fileCount + this.directoryCount);
return new ContentSummary(this);
}
private long length;
private long fileCount;
private long directoryCount;
private long snapshotLength;
private long snapshotFileCount;
private long snapshotDirectoryCount;
private long snapshotSpaceConsumed;
private String erasureCodingPolicy;
}
/** Constructor deprecated by ContentSummary.Builder*/
@Deprecated
public ContentSummary() {}
/**
* Constructor, deprecated by ContentSummary.Builder
* This constructor implicitly set spaceConsumed the same as length.
* spaceConsumed and length must be set explicitly with
* ContentSummary.Builder.
*
* @param length length.
* @param fileCount file count.
* @param directoryCount directory count.
* */
@Deprecated
public ContentSummary(long length, long fileCount, long directoryCount) {
this(length, fileCount, directoryCount, -1L, length, -1L);
}
/**
* Constructor, deprecated by ContentSummary.Builder.
*
* @param length length.
* @param fileCount file count.
* @param directoryCount directory count.
* @param quota quota.
* @param spaceConsumed space consumed.
* @param spaceQuota space quota.
* */
@Deprecated
public ContentSummary(
long length, long fileCount, long directoryCount, long quota,
long spaceConsumed, long spaceQuota) {
this.length = length;
this.fileCount = fileCount;
this.directoryCount = directoryCount;
setQuota(quota);
setSpaceConsumed(spaceConsumed);
setSpaceQuota(spaceQuota);
}
/**
* Constructor for ContentSummary.Builder.
*
* @param builder builder.
*/
private ContentSummary(Builder builder) {
super(builder);
this.length = builder.length;
this.fileCount = builder.fileCount;
this.directoryCount = builder.directoryCount;
this.snapshotLength = builder.snapshotLength;
this.snapshotFileCount = builder.snapshotFileCount;
this.snapshotDirectoryCount = builder.snapshotDirectoryCount;
this.snapshotSpaceConsumed = builder.snapshotSpaceConsumed;
this.erasureCodingPolicy = builder.erasureCodingPolicy;
}
/** @return the length */
public long getLength() {return length;}
public long getSnapshotLength() {
return snapshotLength;
}
/** @return the directory count */
public long getDirectoryCount() {return directoryCount;}
public long getSnapshotDirectoryCount() {
return snapshotDirectoryCount;
}
/** @return the file count */
public long getFileCount() {return fileCount;}
public long getSnapshotFileCount() {
return snapshotFileCount;
}
public long getSnapshotSpaceConsumed() {
return snapshotSpaceConsumed;
}
public String getErasureCodingPolicy() {
return erasureCodingPolicy;
}
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
out.writeLong(fileCount);
out.writeLong(directoryCount);
out.writeLong(getQuota());
out.writeLong(getSpaceConsumed());
out.writeLong(getSpaceQuota());
}
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
this.length = in.readLong();
this.fileCount = in.readLong();
this.directoryCount = in.readLong();
setQuota(in.readLong());
setSpaceConsumed(in.readLong());
setSpaceQuota(in.readLong());
}
@Override
public boolean equals(Object to) {
if (this == to) {
return true;
} else if (to instanceof ContentSummary) {
ContentSummary right = (ContentSummary) to;
return getLength() == right.getLength() &&
getFileCount() == right.getFileCount() &&
getDirectoryCount() == right.getDirectoryCount() &&
getSnapshotLength() == right.getSnapshotLength() &&
getSnapshotFileCount() == right.getSnapshotFileCount() &&
getSnapshotDirectoryCount() == right.getSnapshotDirectoryCount() &&
getSnapshotSpaceConsumed() == right.getSnapshotSpaceConsumed() &&
getErasureCodingPolicy().equals(right.getErasureCodingPolicy()) &&
super.equals(to);
} else {
return super.equals(to);
}
}
@Override
public int hashCode() {
long result = getLength() ^ getFileCount() ^ getDirectoryCount()
^ getSnapshotLength() ^ getSnapshotFileCount()
^ getSnapshotDirectoryCount() ^ getSnapshotSpaceConsumed()
^ getErasureCodingPolicy().hashCode();
return ((int) result) ^ super.hashCode();
}
/**
* Output format:
* <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String SUMMARY_FORMAT = "%12s %12s %18s ";
private static final String[] SUMMARY_HEADER_FIELDS =
new String[] {"DIR_COUNT", "FILE_COUNT", "CONTENT_SIZE"};
/** The header string */
private static final String SUMMARY_HEADER = String.format(
SUMMARY_FORMAT, (Object[]) SUMMARY_HEADER_FIELDS);
private static final String ALL_HEADER = QUOTA_HEADER + SUMMARY_HEADER;
/**
* Output format:
* <--------20-------->
* ERASURECODING_POLICY
*/
private static final String ERASURECODING_POLICY_FORMAT = "%20s ";
private static final String ERASURECODING_POLICY_HEADER_FIELD =
"ERASURECODING_POLICY";
/** The header string. */
private static final String ERASURECODING_POLICY_HEADER = String.format(
ERASURECODING_POLICY_FORMAT, ERASURECODING_POLICY_HEADER_FIELD);
/**
* Output format:<-------18-------> <----------24---------->
* <----------24---------->. <-------------28------------> SNAPSHOT_LENGTH
* SNAPSHOT_FILE_COUNT SNAPSHOT_DIR_COUNT SNAPSHOT_SPACE_CONSUMED
*/
private static final String SNAPSHOT_FORMAT = "%18s %24s %24s %28s ";
private static final String[] SNAPSHOT_HEADER_FIELDS =
new String[] {"SNAPSHOT_LENGTH", "SNAPSHOT_FILE_COUNT",
"SNAPSHOT_DIR_COUNT", "SNAPSHOT_SPACE_CONSUMED"};
/** The header string. */
private static final String SNAPSHOT_HEADER =
String.format(SNAPSHOT_FORMAT, (Object[]) SNAPSHOT_HEADER_FIELDS);
/** Return the header of the output.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
*
* @param qOption a flag indicating if quota needs to be printed or not
* @return the header of the output
*/
public static String getHeader(boolean qOption) {
return qOption ? ALL_HEADER : SUMMARY_HEADER;
}
public static String getErasureCodingPolicyHeader() {
return ERASURECODING_POLICY_HEADER;
}
public static String getSnapshotHeader() {
return SNAPSHOT_HEADER;
}
/**
* Returns the names of the fields from the summary header.
*
* @return names of fields as displayed in the header
*/
public static String[] getHeaderFields() {
return SUMMARY_HEADER_FIELDS;
}
/**
* Returns the names of the fields used in the quota summary.
*
* @return names of quota fields as displayed in the header
*/
public static String[] getQuotaHeaderFields() {
return QUOTA_HEADER_FIELDS;
}
@Override
public String toString() {
return toString(true);
}
/** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
*
* @param qOption a flag indicating if quota needs to be printed or not
* @return the string representation of the object
*/
@Override
public String toString(boolean qOption) {
return toString(qOption, false);
}
/** Return the string representation of the object in the output format.
* For description of the options,
* @see #toString(boolean, boolean, boolean, boolean, List)
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output if to be used
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption) {
return toString(qOption, hOption, false, null);
}
/** Return the string representation of the object in the output format.
* For description of the options,
* @see #toString(boolean, boolean, boolean, boolean, List)
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output is to be used
* @param xOption a flag indicating if calculation from snapshots is to be
* included in the output
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption, boolean xOption) {
return toString(qOption, hOption, false, xOption, null);
}
/**
* Return the string representation of the object in the output format.
* For description of the options,
* @see #toString(boolean, boolean, boolean, boolean, List)
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output if to be used
* @param tOption a flag indicating if display quota by storage types
* @param types Storage types to display
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption,
boolean tOption, List<StorageType> types) {
return toString(qOption, hOption, tOption, false, types);
}
/** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
* if hOption is false, file sizes are returned in bytes
* if hOption is true, file sizes are returned in human readable
* if tOption is true, display the quota by storage types
* if tOption is false, same logic with #toString(boolean,boolean)
* if xOption is false, output includes the calculation from snapshots
* if xOption is true, output excludes the calculation from snapshots
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output is to be used
* @param tOption a flag indicating if display quota by storage types
* @param xOption a flag indicating if calculation from snapshots is to be
* included in the output
* @param types Storage types to display
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption, boolean tOption,
boolean xOption, List<StorageType> types) {
String prefix = "";
if (tOption) {
return getTypesQuotaUsage(hOption, types);
}
if (qOption) {
prefix = getQuotaUsage(hOption);
}
if (xOption) {
return prefix + String.format(SUMMARY_FORMAT,
formatSize(directoryCount - snapshotDirectoryCount, hOption),
formatSize(fileCount - snapshotFileCount, hOption),
formatSize(length - snapshotLength, hOption));
} else {
return prefix + String.format(SUMMARY_FORMAT,
formatSize(directoryCount, hOption),
formatSize(fileCount, hOption),
formatSize(length, hOption));
}
}
/**
* Formats a size to be human readable or in bytes.
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long size, boolean humanReadable) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
/**
* @return Constant-width String representation of Erasure Coding Policy
*/
public String toErasureCodingPolicy() {
return String.format(ERASURECODING_POLICY_FORMAT,
erasureCodingPolicy.equals("Replicated")
? erasureCodingPolicy : "EC:" + erasureCodingPolicy);
}
/**
* Return the string representation of the snapshot counts in the output
* format.
* @param hOption flag indicating human readable or not
* @return String representation of the snapshot counts
*/
public String toSnapshot(boolean hOption) {
return String.format(SNAPSHOT_FORMAT, formatSize(snapshotLength, hOption),
formatSize(snapshotFileCount, hOption),
formatSize(snapshotDirectoryCount, hOption),
formatSize(snapshotSpaceConsumed, hOption));
}
}
| Builder |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/dynamicrouter/DynamicRouterConcurrentPOJOManualTest.java | {
"start": 4451,
"end": 4866
} | class ____ {
private final String target;
public MyDynamicRouterPojo(String target) {
this.target = target;
}
@DynamicRouter
public String route(@Header(Exchange.SLIP_ENDPOINT) String previous) {
if (previous == null) {
return target;
} else {
return null;
}
}
}
}
| MyDynamicRouterPojo |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ops/MergeMultipleEntityCopiesAllowedOrphanDeleteTest.java | {
"start": 1268,
"end": 15024
} | class ____ {
@Test
@FailureExpected(jiraKey = "HHH-9240")
public void testTopLevelUnidirOneToManyBackrefWithNewElement(SessionFactoryScope scope) {
Item item1 = new Item();
item1.setName( "item1 name" );
SubItem subItem1 = new SubItem();
subItem1.setName( "subItem1 name" );
item1.getSubItemsBackref().add( subItem1 );
scope.inTransaction(
session ->
session.persist( item1 )
);
// get another representation of item1
Item item1_1 = scope.fromTransaction(
session ->
session.get( Item.class, item1.getId() )
);
assertFalse( Hibernate.isInitialized( item1_1.getSubItemsBackref() ) );
Category category = new Category();
category.setName( "category" );
SubItem subItem2 = new SubItem();
subItem2.setName( "subItem2 name" );
item1.getSubItemsBackref().add( subItem2 );
item1.setCategory( category );
category.setExampleItem( item1_1 );
scope.inTransaction(
session -> {
// The following will fail due to PropertyValueException because item1 will
// be removed from the inverted merge map when the operation cascades to item1_1.
Item item1Merged = (Item) session.merge( item1 );
// top-level collection should win
assertThat( item1Merged.getSubItemsBackref().size(), is( 2 ) );
}
);
scope.inTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
assertThat( item.getSubItemsBackref().size(), is( 2 ) );
}
);
cleanup( scope );
}
@Test
@FailureExpected(jiraKey = "HHH-9239")
public void testNestedUnidirOneToManyBackrefWithNewElement(SessionFactoryScope scope) {
Item item1 = new Item();
item1.setName( "item1 name" );
SubItem subItem1 = new SubItem();
subItem1.setName( "subItem1 name" );
item1.getSubItemsBackref().add( subItem1 );
scope.inTransaction(
session ->
session.persist( item1 )
);
// get another representation of item1
Item item1_1 = scope.fromTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
Hibernate.initialize( item.getSubItemsBackref() );
return item;
}
);
Category category = new Category();
category.setName( "category" );
item1.setCategory( category );
// Add a new SubItem to the Item representation that will be in a nested association.
SubItem subItem2 = new SubItem();
subItem2.setName( "subItem2 name" );
item1_1.getSubItemsBackref().add( subItem2 );
category.setExampleItem( item1_1 );
scope.inTransaction(
session -> {
Item item1Merged = (Item) session.merge( item1 );
// The resulting collection should contain the added element
assertThat( item1Merged.getSubItemsBackref().size(), is( 2 ) );
assertThat( item1Merged.getSubItemsBackref().get( 0 ).getName(), is( "subItem1 name" ) );
assertThat( item1Merged.getSubItemsBackref().get( 1 ).getName(), is( "subItem2 name" ) );
}
);
scope.inTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
assertThat( item.getSubItemsBackref().size(), is( 2 ) );
assertThat( item.getSubItemsBackref().get( 0 ).getName(), is( "subItem1 name" ) );
assertThat( item.getSubItemsBackref().get( 1 ).getName(), is( "subItem2 name" ) );
}
);
cleanup( scope );
}
@Test
//@FailureExpected( jiraKey = "HHH-9106" )
public void testTopLevelUnidirOneToManyBackrefWithRemovedElement(SessionFactoryScope scope) {
Item item1 = new Item();
item1.setName( "item1 name" );
SubItem subItem1 = new SubItem();
subItem1.setName( "subItem1 name" );
item1.getSubItemsBackref().add( subItem1 );
SubItem subItem2 = new SubItem();
subItem2.setName( "subItem2 name" );
item1.getSubItemsBackref().add( subItem2 );
scope.inTransaction(
session ->
session.persist( item1 )
);
// get another representation of item1
Item item1_1 = scope.fromTransaction(
session ->
session.get( Item.class, item1.getId() )
);
assertFalse( Hibernate.isInitialized( item1_1.getSubItemsBackref() ) );
Category category = new Category();
category.setName( "category" );
item1.setCategory( category );
category.setExampleItem( item1_1 );
// remove subItem1 from top-level Item
item1.getSubItemsBackref().remove( subItem1 );
scope.inTransaction(
session -> {
Item item1Merged = (Item) session.merge( item1 );
// element should be removed
assertThat( item1Merged.getSubItemsBackref().size(), is( 1 ) );
}
);
scope.inTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
assertThat( item.getSubItemsBackref().size(), is( 1 ) );
// because cascade includes "delete-orphan" the removed SubItem should have been deleted.
SubItem subItem = session.get( SubItem.class, subItem1.getId() );
assertNull( subItem );
}
);
cleanup( scope );
}
@Test
@FailureExpected(jiraKey = "HHH-9239")
public void testNestedUnidirOneToManyBackrefWithRemovedElement(SessionFactoryScope scope) {
Item item1 = new Item();
item1.setName( "item1 name" );
SubItem subItem1 = new SubItem();
subItem1.setName( "subItem1 name" );
item1.getSubItemsBackref().add( subItem1 );
SubItem subItem2 = new SubItem();
subItem2.setName( "subItem2 name" );
item1.getSubItemsBackref().add( subItem2 );
scope.inTransaction(
session ->
session.persist( item1 )
);
// get another representation of item1
Item item1_1 = scope.fromTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
Hibernate.initialize( item.getSubItemsBackref() );
return item;
}
);
// remove subItem1 from the nested Item
item1_1.getSubItemsBackref().remove( subItem1 );
Category category = new Category();
category.setName( "category" );
item1.setCategory( category );
category.setExampleItem( item1_1 );
scope.inTransaction(
session -> {
Item item1Merged = (Item) session.merge( item1 );
// the element should have been removed
assertThat( item1Merged.getSubItemsBackref().size(), is( 1 ) );
assertTrue( item1Merged.getSubItemsBackref().contains( subItem2 ) );
}
);
scope.inTransaction(
session -> {
Item item = session.get( Item.class, item1.getId() );
assertThat( item.getSubItemsBackref().size(), is( 1 ) );
assertTrue( item.getSubItemsBackref().contains( subItem2 ) );
// because cascade includes "delete-orphan" the removed SubItem should have been deleted.
SubItem subItem = session.get( SubItem.class, subItem1.getId() );
assertNull( subItem );
}
);
cleanup( scope );
}
@Test
//@FailureExpected( jiraKey = "HHH-9106" )
public void testTopLevelUnidirOneToManyNoBackrefWithNewElement(SessionFactoryScope scope) {
Category category1 = new Category();
category1.setName( "category1 name" );
SubCategory subCategory1 = new SubCategory();
subCategory1.setName( "subCategory1 name" );
category1.getSubCategories().add( subCategory1 );
scope.inTransaction(
session ->
session.persist( category1 )
);
// get another representation of category1
Category category1_1 = scope.fromTransaction(
session ->
session.get( Category.class, category1.getId() )
);
assertFalse( Hibernate.isInitialized( category1_1.getSubCategories() ) );
SubCategory subCategory2 = new SubCategory();
subCategory2.setName( "subCategory2 name" );
category1.getSubCategories().add( subCategory2 );
Item item = new Item();
item.setName( "item" );
category1.setExampleItem( item );
item.setCategory( category1_1 );
scope.inTransaction(
session -> {
Category category1Merged = (Category) session.merge( category1 );
assertThat( category1Merged.getSubCategories().size(), is( 2 ) );
}
);
scope.inTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
assertThat( category.getSubCategories().size(), is( 2 ) );
}
);
cleanup( scope );
}
@Test
@FailureExpected(jiraKey = "HHH-9239")
public void testNestedUnidirOneToManyNoBackrefWithNewElement(SessionFactoryScope scope) {
Category category1 = new Category();
category1.setName( "category1 name" );
SubCategory subCategory1 = new SubCategory();
subCategory1.setName( "subCategory1 name" );
category1.getSubCategories().add( subCategory1 );
scope.inTransaction(
session ->
session.persist( category1 )
);
// get another representation of category1
Category category1_1 = scope.fromTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
Hibernate.initialize( category.getSubCategories() );
return category;
}
);
SubCategory subCategory2 = new SubCategory();
subCategory2.setName( "subCategory2 name" );
category1_1.getSubCategories().add( subCategory2 );
Item item = new Item();
item.setName( "item" );
category1.setExampleItem( item );
item.setCategory( category1_1 );
scope.inTransaction(
session -> {
Category category1Merged = (Category) session.merge( category1 );
// new element should be there
assertThat( category1Merged.getSubCategories().size(), is( 2 ) );
}
);
scope.inTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
assertThat( category.getSubCategories().size(), is( 2 ) );
}
);
cleanup( scope );
}
@Test
//@FailureExpected( jiraKey = "HHH-9106" )
public void testTopLevelUnidirOneToManyNoBackrefWithRemovedElement(SessionFactoryScope scope) {
Category category1 = new Category();
category1.setName( "category1 name" );
SubCategory subCategory1 = new SubCategory();
subCategory1.setName( "subCategory1 name" );
category1.getSubCategories().add( subCategory1 );
SubCategory subCategory2 = new SubCategory();
subCategory2.setName( "subCategory2 name" );
category1.getSubCategories().add( subCategory2 );
scope.inTransaction(
session ->
session.persist( category1 )
);
// get another representation of category1
Category category1_1 = scope.fromTransaction(
session ->
session.get( Category.class, category1.getId() )
);
assertFalse( Hibernate.isInitialized( category1_1.getSubCategories() ) );
Item item = new Item();
item.setName( "item" );
category1.setExampleItem( item );
item.setCategory( category1_1 );
category1.getSubCategories().remove( subCategory1 );
scope.inTransaction(
session -> {
Category category1Merged = (Category) session.merge( category1 );
assertThat( category1Merged.getSubCategories().size(), is( 1 ) );
assertTrue( category1Merged.getSubCategories().contains( subCategory2 ) );
}
);
scope.inTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
assertThat( category.getSubCategories().size(), is( 1 ) );
assertTrue( category.getSubCategories().contains( subCategory2 ) );
SubCategory subCategory = session.get( SubCategory.class, subCategory1.getId() );
assertNull( subCategory );
}
);
cleanup( scope );
}
@Test
@FailureExpected(jiraKey = "HHH-9239")
public void testNestedUnidirOneToManyNoBackrefWithRemovedElement(SessionFactoryScope scope) {
Category category1 = new Category();
category1.setName( "category1 name" );
SubCategory subCategory1 = new SubCategory();
subCategory1.setName( "subCategory1 name" );
category1.getSubCategories().add( subCategory1 );
SubCategory subCategory2 = new SubCategory();
subCategory2.setName( "subCategory2 name" );
category1.getSubCategories().add( subCategory2 );
scope.inTransaction(
session ->
session.persist( category1 )
);
// get another representation of category1
Category category1_1 = scope.fromTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
Hibernate.initialize( category.getSubCategories() );
return category;
}
);
category1_1.getSubCategories().remove( subCategory2 );
Item item = new Item();
item.setName( "item" );
category1.setExampleItem( item );
item.setCategory( category1_1 );
scope.inTransaction(
session -> {
Category category1Merged = (Category) session.merge( category1 );
assertThat( category1Merged.getSubCategories().size(), is( 1 ) );
assertTrue( category1Merged.getSubCategories().contains( subCategory2 ) );
}
);
scope.inTransaction(
session -> {
Category category = session.get( Category.class, category1.getId() );
assertThat( category.getSubCategories().size(), is( 1 ) );
assertTrue( category1.getSubCategories().contains( subCategory2 ) );
SubCategory subCategory = session.get( SubCategory.class, subCategory1.getId() );
assertNull( subCategory );
}
);
cleanup( scope );
}
@SuppressWarnings("unchecked")
private void cleanup(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.createQuery( "delete from SubItem" ).executeUpdate();
for ( Hoarder hoarder : (List<Hoarder>) session.createQuery( "from Hoarder" ).list() ) {
hoarder.getItems().clear();
session.remove( hoarder );
}
for ( Category category : (List<Category>) session.createQuery( "from Category" ).list() ) {
if ( category.getExampleItem() != null ) {
category.setExampleItem( null );
session.remove( category );
}
}
for ( Item item : (List<Item>) session.createQuery( "from Item" ).list() ) {
item.setCategory( null );
session.remove( item );
}
session.createQuery( "delete from Item" ).executeUpdate();
}
);
}
}
| MergeMultipleEntityCopiesAllowedOrphanDeleteTest |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/ssl/PemParser.java | {
"start": 7445,
"end": 8947
} | class ____ implements Decoder {
@Override
public Collection<?> decode(byte[] der) throws GeneralSecurityException {
EncryptedPrivateKeyInfo keyInfo;
try {
keyInfo = new EncryptedPrivateKeyInfo(der);
} catch (IOException e) {
throw new GeneralSecurityException("Invalid DER", e);
}
String cipherAlg = keyInfo.getAlgName();
if (cipherAlg.equals("PBES2")) {
// Java >= 19 does this automatically
cipherAlg = keyInfo.getAlgParameters().toString();
}
SecretKeyFactory skf = provider == null ? SecretKeyFactory.getInstance(cipherAlg) : SecretKeyFactory.getInstance(cipherAlg, provider);
if (password == null) {
throw new IllegalArgumentException("Encrypted private key found but no password given");
}
SecretKey sk = skf.generateSecret(new PBEKeySpec(password.toCharArray()));
Cipher cipher = Cipher.getInstance(cipherAlg);
cipher.init(Cipher.DECRYPT_MODE, sk, keyInfo.getAlgParameters());
PKCS8EncodedKeySpec keySpec = keyInfo.getKeySpec(cipher);
String keyAlg = keySpec.getAlgorithm();
KeyFactory factory = provider == null ? KeyFactory.getInstance(keyAlg) : KeyFactory.getInstance(keyAlg, provider);
return List.of(factory.generatePrivate(keySpec));
}
}
private final | Pkcs8EncryptedPrivateKey |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoPublishOn.java | {
"start": 1138,
"end": 1713
} | class ____<T> extends InternalMonoOperator<T, T> {
final Scheduler scheduler;
MonoPublishOn(Mono<? extends T> source, Scheduler scheduler) {
super(source);
this.scheduler = scheduler;
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
return new PublishOnSubscriber<T>(actual, scheduler);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_ON) return scheduler;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.ASYNC;
return super.scanUnsafe(key);
}
static final | MonoPublishOn |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvnsh/CommonsCliShellOptions.java | {
"start": 1168,
"end": 1917
} | class ____ extends CommonsCliOptions implements ShellOptions {
public static CommonsCliShellOptions parse(String[] args) throws ParseException {
CLIManager cliManager = new CLIManager();
return new CommonsCliShellOptions(Options.SOURCE_CLI, cliManager, cliManager.parse(args));
}
protected CommonsCliShellOptions(String source, CLIManager cliManager, CommandLine commandLine) {
super(source, cliManager, commandLine);
}
@Override
protected CommonsCliShellOptions copy(
String source, CommonsCliOptions.CLIManager cliManager, CommandLine commandLine) {
return new CommonsCliShellOptions(source, (CLIManager) cliManager, commandLine);
}
protected static | CommonsCliShellOptions |
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/util/ConverterUtils.java | {
"start": 1196,
"end": 4110
} | class ____ {
private static final byte[] WINDOWS_RETURN_BYTES = { 13, 10 };
private static final byte[] UNIX_RETURN_BYTES = { 10 };
private static final byte[] MAC_RETURN_BYTES = { 13 };
private static final String CRLF = "\r\n";
private static final String LINE_BREAK = "\n";
private static final String CARRIAGE_RETURN = "\r";
private ConverterUtils() {
// helper class
}
public static char getCharDelimiter(String separator) {
if (separator.equals("\\u0001")) {
return '\u0001';
} else if (separator.equals("\\t") || separator.equals("\\u0009")) {
return '\u0009';
} else if (separator.length() > 1) {
return separator.charAt(separator.length() - 1);
} else {
return separator.charAt(0);
}
}
public static byte[] getByteReturn(String returnCharacter) {
if (returnCharacter.equals("WINDOWS")) {
return WINDOWS_RETURN_BYTES;
} else if (returnCharacter.equals("UNIX")) {
return UNIX_RETURN_BYTES;
} else if (returnCharacter.equals("MAC")) {
return MAC_RETURN_BYTES;
} else {
return returnCharacter.getBytes();
}
}
public static String getStringCarriageReturn(String returnCharacter) {
if (returnCharacter.equals("WINDOWS")) {
return CRLF;
} else if (returnCharacter.equals("UNIX")) {
return LINE_BREAK;
} else if (returnCharacter.equals("MAC")) {
return CARRIAGE_RETURN;
} else {
return returnCharacter;
}
}
public static FormattingOptions convert(DataField dataField, Class<?> clazz, BindyConverter converter, String locale) {
return new FormattingOptions()
.forClazz(clazz)
.withPattern(dataField.pattern())
.withLocale(locale)
.withTimezone(dataField.timezone())
.withPrecision(dataField.precision())
.withRounding(dataField.rounding())
.withImpliedDecimalSeparator(dataField.impliedDecimalSeparator())
.withDecimalSeparator(dataField.decimalSeparator())
.withBindyConverter(converter)
.withGroupingSeparator(dataField.groupingSeparator());
}
public static FormattingOptions convert(
KeyValuePairField dataField, Class<?> clazz, BindyConverter converter, String locale) {
return new FormattingOptions()
.forClazz(clazz)
.withPattern(dataField.pattern())
.withLocale(locale)
.withTimezone(dataField.timezone())
.withPrecision(dataField.precision())
.withBindyConverter(converter)
.withImpliedDecimalSeparator(dataField.impliedDecimalSeparator());
}
}
| ConverterUtils |
java | jhy__jsoup | src/main/java/org/jsoup/select/Elements.java | {
"start": 1058,
"end": 4910
} | class ____ extends Nodes<Element> {
public Elements() {
}
public Elements(int initialCapacity) {
super(initialCapacity);
}
public Elements(Collection<Element> elements) {
super(elements);
}
public Elements(List<Element> elements) {
super(elements);
}
public Elements(Element... elements) {
super(Arrays.asList(elements));
}
/**
* Creates a deep copy of these elements.
* @return a deep copy
*/
@Override
public Elements clone() {
Elements clone = new Elements(size());
for (Element e : this)
clone.add(e.clone());
return clone;
}
/**
Convenience method to get the Elements as a plain ArrayList. This allows modification to the list of elements
without modifying the source Document. I.e. whereas calling {@code elements.remove(0)} will remove the element from
both the Elements and the DOM, {@code elements.asList().remove(0)} will remove the element from the list only.
<p>Each Element is still the same DOM connected Element.</p>
@return a new ArrayList containing the elements in this list
@since 1.19.2
@see #Elements(List)
*/
@Override
public ArrayList<Element> asList() {
return new ArrayList<>(this);
}
// attribute methods
/**
Get an attribute value from the first matched element that has the attribute.
@param attributeKey The attribute key.
@return The attribute value from the first matched element that has the attribute. If no elements were matched (isEmpty() == true),
or if the no elements have the attribute, returns empty string.
@see #hasAttr(String)
*/
public String attr(String attributeKey) {
for (Element element : this) {
if (element.hasAttr(attributeKey))
return element.attr(attributeKey);
}
return "";
}
/**
Checks if any of the matched elements have this attribute defined.
@param attributeKey attribute key
@return true if any of the elements have the attribute; false if none do.
*/
public boolean hasAttr(String attributeKey) {
for (Element element : this) {
if (element.hasAttr(attributeKey))
return true;
}
return false;
}
/**
* Get the attribute value for each of the matched elements. If an element does not have this attribute, no value is
* included in the result set for that element.
* @param attributeKey the attribute name to return values for. You can add the {@code abs:} prefix to the key to
* get absolute URLs from relative URLs, e.g.: {@code doc.select("a").eachAttr("abs:href")} .
* @return a list of each element's attribute value for the attribute
*/
public List<String> eachAttr(String attributeKey) {
List<String> attrs = new ArrayList<>(size());
for (Element element : this) {
if (element.hasAttr(attributeKey))
attrs.add(element.attr(attributeKey));
}
return attrs;
}
/**
* Set an attribute on all matched elements.
* @param attributeKey attribute key
* @param attributeValue attribute value
* @return this
*/
public Elements attr(String attributeKey, String attributeValue) {
for (Element element : this) {
element.attr(attributeKey, attributeValue);
}
return this;
}
/**
* Remove an attribute from every matched element.
* @param attributeKey The attribute to remove.
* @return this (for chaining)
*/
public Elements removeAttr(String attributeKey) {
for (Element element : this) {
element.removeAttr(attributeKey);
}
return this;
}
/**
Add the | Elements |
java | google__dagger | javatests/dagger/functional/producers/subcomponent/sub/ChildComponent.java | {
"start": 1013,
"end": 1067
} | interface ____ {
ChildComponent build();
}
}
| Builder |
java | apache__camel | components/camel-wordpress/src/main/java/org/apache/camel/component/wordpress/api/service/WordpressServiceUsers.java | {
"start": 1004,
"end": 1095
} | interface ____ extends WordpressCrudService<User, UserSearchCriteria> {
}
| WordpressServiceUsers |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/text/StrMatcherTest.java | {
"start": 1200,
"end": 8496
} | class ____ extends AbstractLangTest {
private static final char[] BUFFER1 = "0,1\t2 3\n\r\f\u0000'\"".toCharArray();
private static final char[] BUFFER2 = "abcdef".toCharArray();
@Test
void testCharMatcher_char() {
final StrMatcher matcher = StrMatcher.charMatcher('c');
assertEquals(0, matcher.isMatch(BUFFER2, 0));
assertEquals(0, matcher.isMatch(BUFFER2, 1));
assertEquals(1, matcher.isMatch(BUFFER2, 2));
assertEquals(0, matcher.isMatch(BUFFER2, 3));
assertEquals(0, matcher.isMatch(BUFFER2, 4));
assertEquals(0, matcher.isMatch(BUFFER2, 5));
}
@Test
void testCharSetMatcher_charArray() {
final StrMatcher matcher = StrMatcher.charSetMatcher("ace".toCharArray());
assertEquals(1, matcher.isMatch(BUFFER2, 0));
assertEquals(0, matcher.isMatch(BUFFER2, 1));
assertEquals(1, matcher.isMatch(BUFFER2, 2));
assertEquals(0, matcher.isMatch(BUFFER2, 3));
assertEquals(1, matcher.isMatch(BUFFER2, 4));
assertEquals(0, matcher.isMatch(BUFFER2, 5));
assertSame(StrMatcher.noneMatcher(), StrMatcher.charSetMatcher());
assertSame(StrMatcher.noneMatcher(), StrMatcher.charSetMatcher((char[]) null));
assertInstanceOf(StrMatcher.CharMatcher.class, StrMatcher.charSetMatcher("a".toCharArray()));
}
@Test
void testCharSetMatcher_String() {
final StrMatcher matcher = StrMatcher.charSetMatcher("ace");
assertEquals(1, matcher.isMatch(BUFFER2, 0));
assertEquals(0, matcher.isMatch(BUFFER2, 1));
assertEquals(1, matcher.isMatch(BUFFER2, 2));
assertEquals(0, matcher.isMatch(BUFFER2, 3));
assertEquals(1, matcher.isMatch(BUFFER2, 4));
assertEquals(0, matcher.isMatch(BUFFER2, 5));
assertSame(StrMatcher.noneMatcher(), StrMatcher.charSetMatcher(""));
assertSame(StrMatcher.noneMatcher(), StrMatcher.charSetMatcher((String) null));
assertInstanceOf(StrMatcher.CharMatcher.class, StrMatcher.charSetMatcher("a"));
}
@Test
void testCommaMatcher() {
final StrMatcher matcher = StrMatcher.commaMatcher();
assertSame(matcher, StrMatcher.commaMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 0));
assertEquals(1, matcher.isMatch(BUFFER1, 1));
assertEquals(0, matcher.isMatch(BUFFER1, 2));
}
@Test
void testDoubleQuoteMatcher() {
final StrMatcher matcher = StrMatcher.doubleQuoteMatcher();
assertSame(matcher, StrMatcher.doubleQuoteMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 11));
assertEquals(1, matcher.isMatch(BUFFER1, 12));
}
@Test
void testMatcherIndices() {
// remember that the API contract is tight for the isMatch() method
// all the onus is on the caller, so invalid inputs are not
// the concern of StrMatcher, and are not bugs
final StrMatcher matcher = StrMatcher.stringMatcher("bc");
assertEquals(2, matcher.isMatch(BUFFER2, 1, 1, BUFFER2.length));
assertEquals(2, matcher.isMatch(BUFFER2, 1, 0, 3));
assertEquals(0, matcher.isMatch(BUFFER2, 1, 0, 2));
}
@Test
void testNoneMatcher() {
final StrMatcher matcher = StrMatcher.noneMatcher();
assertSame(matcher, StrMatcher.noneMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 0));
assertEquals(0, matcher.isMatch(BUFFER1, 1));
assertEquals(0, matcher.isMatch(BUFFER1, 2));
assertEquals(0, matcher.isMatch(BUFFER1, 3));
assertEquals(0, matcher.isMatch(BUFFER1, 4));
assertEquals(0, matcher.isMatch(BUFFER1, 5));
assertEquals(0, matcher.isMatch(BUFFER1, 6));
assertEquals(0, matcher.isMatch(BUFFER1, 7));
assertEquals(0, matcher.isMatch(BUFFER1, 8));
assertEquals(0, matcher.isMatch(BUFFER1, 9));
assertEquals(0, matcher.isMatch(BUFFER1, 10));
assertEquals(0, matcher.isMatch(BUFFER1, 11));
assertEquals(0, matcher.isMatch(BUFFER1, 12));
}
@Test
void testQuoteMatcher() {
final StrMatcher matcher = StrMatcher.quoteMatcher();
assertSame(matcher, StrMatcher.quoteMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 10));
assertEquals(1, matcher.isMatch(BUFFER1, 11));
assertEquals(1, matcher.isMatch(BUFFER1, 12));
}
@Test
void testSingleQuoteMatcher() {
final StrMatcher matcher = StrMatcher.singleQuoteMatcher();
assertSame(matcher, StrMatcher.singleQuoteMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 10));
assertEquals(1, matcher.isMatch(BUFFER1, 11));
assertEquals(0, matcher.isMatch(BUFFER1, 12));
}
@Test
void testSpaceMatcher() {
final StrMatcher matcher = StrMatcher.spaceMatcher();
assertSame(matcher, StrMatcher.spaceMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 4));
assertEquals(1, matcher.isMatch(BUFFER1, 5));
assertEquals(0, matcher.isMatch(BUFFER1, 6));
}
@Test
void testSplitMatcher() {
final StrMatcher matcher = StrMatcher.splitMatcher();
assertSame(matcher, StrMatcher.splitMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 2));
assertEquals(1, matcher.isMatch(BUFFER1, 3));
assertEquals(0, matcher.isMatch(BUFFER1, 4));
assertEquals(1, matcher.isMatch(BUFFER1, 5));
assertEquals(0, matcher.isMatch(BUFFER1, 6));
assertEquals(1, matcher.isMatch(BUFFER1, 7));
assertEquals(1, matcher.isMatch(BUFFER1, 8));
assertEquals(1, matcher.isMatch(BUFFER1, 9));
assertEquals(0, matcher.isMatch(BUFFER1, 10));
}
@Test
void testStringMatcher_String() {
final StrMatcher matcher = StrMatcher.stringMatcher("bc");
assertEquals(0, matcher.isMatch(BUFFER2, 0));
assertEquals(2, matcher.isMatch(BUFFER2, 1));
assertEquals(0, matcher.isMatch(BUFFER2, 2));
assertEquals(0, matcher.isMatch(BUFFER2, 3));
assertEquals(0, matcher.isMatch(BUFFER2, 4));
assertEquals(0, matcher.isMatch(BUFFER2, 5));
assertSame(StrMatcher.noneMatcher(), StrMatcher.stringMatcher(""));
assertSame(StrMatcher.noneMatcher(), StrMatcher.stringMatcher(null));
}
@Test
void testTabMatcher() {
final StrMatcher matcher = StrMatcher.tabMatcher();
assertSame(matcher, StrMatcher.tabMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 2));
assertEquals(1, matcher.isMatch(BUFFER1, 3));
assertEquals(0, matcher.isMatch(BUFFER1, 4));
}
@Test
void testTrimMatcher() {
final StrMatcher matcher = StrMatcher.trimMatcher();
assertSame(matcher, StrMatcher.trimMatcher());
assertEquals(0, matcher.isMatch(BUFFER1, 2));
assertEquals(1, matcher.isMatch(BUFFER1, 3));
assertEquals(0, matcher.isMatch(BUFFER1, 4));
assertEquals(1, matcher.isMatch(BUFFER1, 5));
assertEquals(0, matcher.isMatch(BUFFER1, 6));
assertEquals(1, matcher.isMatch(BUFFER1, 7));
assertEquals(1, matcher.isMatch(BUFFER1, 8));
assertEquals(1, matcher.isMatch(BUFFER1, 9));
assertEquals(1, matcher.isMatch(BUFFER1, 10));
}
}
| StrMatcherTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/BasicTypeSerializerUpgradeTestSpecifications.java | {
"start": 31368,
"end": 32416
} | class ____
implements TypeSerializerUpgradeTestBase.UpgradeVerifier<java.sql.Date> {
@Override
public TypeSerializer<java.sql.Date> createUpgradedSerializer() {
return SqlDateSerializer.INSTANCE;
}
@Override
public Condition<java.sql.Date> testDataCondition() {
return new Condition<>(
value -> value.equals(new java.sql.Date(1580382960L)), "value is 1580382960L");
}
@Override
public Condition<TypeSerializerSchemaCompatibility<java.sql.Date>>
schemaCompatibilityCondition(FlinkVersion version) {
return TypeSerializerConditions.isCompatibleAsIs();
}
}
// ----------------------------------------------------------------------------------------------
// Specification for "sql-time-serializer"
// ----------------------------------------------------------------------------------------------
/** SqlTimeSerializerSetup. */
public static final | SqlDateSerializerVerifier |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/SettingsBuilderRequest.java | {
"start": 6494,
"end": 9627
} | class ____ extends BaseRequest<ProtoSession>
implements SettingsBuilderRequest {
private final Source installationSettingsSource;
private final Source projectSettingsSource;
private final Source userSettingsSource;
private final UnaryOperator<String> interpolationSource;
@SuppressWarnings("checkstyle:ParameterNumber")
DefaultSettingsBuilderRequest(
@Nonnull ProtoSession session,
@Nullable RequestTrace trace,
@Nullable Source installationSettingsSource,
@Nullable Source projectSettingsSource,
@Nullable Source userSettingsSource,
@Nullable UnaryOperator<String> interpolationSource) {
super(session, trace);
this.installationSettingsSource = installationSettingsSource;
this.projectSettingsSource = projectSettingsSource;
this.userSettingsSource = userSettingsSource;
this.interpolationSource = interpolationSource;
}
@Nonnull
@Override
public Optional<Source> getInstallationSettingsSource() {
return Optional.ofNullable(installationSettingsSource);
}
@Nonnull
@Override
public Optional<Source> getProjectSettingsSource() {
return Optional.ofNullable(projectSettingsSource);
}
@Nonnull
@Override
public Optional<Source> getUserSettingsSource() {
return Optional.ofNullable(userSettingsSource);
}
@Nonnull
@Override
public Optional<UnaryOperator<String>> getInterpolationSource() {
return Optional.ofNullable(interpolationSource);
}
@Override
public boolean equals(Object o) {
return o instanceof DefaultSettingsBuilderRequest that
&& Objects.equals(installationSettingsSource, that.installationSettingsSource)
&& Objects.equals(projectSettingsSource, that.projectSettingsSource)
&& Objects.equals(userSettingsSource, that.userSettingsSource)
&& Objects.equals(interpolationSource, that.interpolationSource);
}
@Override
public int hashCode() {
return Objects.hash(
installationSettingsSource, projectSettingsSource, userSettingsSource, interpolationSource);
}
@Override
public String toString() {
return "SettingsBuilderRequest[" + "installationSettingsSource="
+ installationSettingsSource + ", projectSettingsSource="
+ projectSettingsSource + ", userSettingsSource="
+ userSettingsSource + ", interpolationSource="
+ interpolationSource + ']';
}
}
}
}
| DefaultSettingsBuilderRequest |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/exceptions/BeanDestructionException.java | {
"start": 851,
"end": 1218
} | class ____ extends BeanContextException {
/**
* @param beanType The bean type
* @param cause The throwable
*/
public BeanDestructionException(@NonNull BeanType<?> beanType, @NonNull Throwable cause) {
super("Error destroying bean of type [" + beanType.getBeanType() + "]: " + cause.getMessage(), cause);
}
}
| BeanDestructionException |
java | apache__flink | flink-architecture-tests/flink-architecture-tests-test/src/main/java/org/apache/flink/architecture/rules/ITCaseRules.java | {
"start": 2289,
"end": 11398
} | class ____ {
private static final String ABSTRACT_TEST_BASE_FQ =
"org.apache.flink.test.util.AbstractTestBase";
private static final String INTERNAL_MINI_CLUSTER_EXTENSION_FQ_NAME =
"org.apache.flink.runtime.testutils.InternalMiniClusterExtension";
private static final String MINI_CLUSTER_EXTENSION_FQ_NAME =
"org.apache.flink.test.junit5.MiniClusterExtension";
private static final String MINI_CLUSTER_TEST_ENVIRONMENT_FQ_NAME =
"org.apache.flink.connector.testframe.environment.MiniClusterTestEnvironment";
private static final String TEST_ENV_FQ_NAME =
"org.apache.flink.connector.testframe.junit.annotations.TestEnv";
@ArchTest
public static final ArchRule INTEGRATION_TEST_ENDING_WITH_ITCASE =
freeze(
javaClassesThat()
.areAssignableTo(
DescribedPredicate.describe(
"is assignable to " + ABSTRACT_TEST_BASE_FQ,
javaClass ->
javaClass
.getName()
.equals(ABSTRACT_TEST_BASE_FQ)))
.and()
.doNotHaveModifier(ABSTRACT)
.should()
.haveSimpleNameEndingWith("ITCase"))
// FALSE by default since 0.23.0 however not every module has inheritors of
// AbstractTestBase
.allowEmptyShould(true)
.as(
"Tests inheriting from AbstractTestBase should have name ending with ITCase");
/**
* In order to pass this check, IT cases must fulfill at least one of the following conditions.
*
* <p>1. For JUnit 5 test, both fields are required like:
*
* <pre>{@code
* @RegisterExtension
* public static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
* new MiniClusterExtension(
* new MiniClusterResourceConfiguration.Builder()
* .setConfiguration(getFlinkConfiguration())
* .build());
* }</pre>
*
* <p>2. For JUnit 5 test, use {@link ExtendWith}:
*
* <pre>{@code
* @ExtendWith(MiniClusterExtension.class)
* }</pre>
*
* <p>3. For JUnit 4 test via @Rule like:
*
* <pre>{@code
* @Rule
* public final MiniClusterWithClientResource miniClusterResource =
* new MiniClusterWithClientResource(
* new MiniClusterResourceConfiguration.Builder()
* .setNumberTaskManagers(1)
* .setNumberSlotsPerTaskManager(PARALLELISM)
* .setRpcServiceSharing(RpcServiceSharing.DEDICATED)
* .withHaLeadershipControl()
* .build());
* }</pre>
*
* <p>4. For JUnit 4 test via @ClassRule like:
*
* <pre>{@code
* @ClassRule
* public static final MiniClusterWithClientResource MINI_CLUSTER =
* new MiniClusterWithClientResource(
* new MiniClusterResourceConfiguration.Builder()
* .setConfiguration(new Configuration())
* .build());
* }</pre>
*/
@ArchTest
public static final ArchRule ITCASE_USE_MINICLUSTER =
freeze(
javaClassesThat()
.haveSimpleNameEndingWith("ITCase")
.and()
.areTopLevelClasses()
.and()
.doNotHaveModifier(ABSTRACT)
.should(
fulfill(
// JUnit 5 violation check
miniClusterExtensionRule()
// JUnit 4 violation check, which should
// be
// removed
// after the JUnit 4->5 migration is
// closed.
// Please refer to FLINK-25858.
.or(
miniClusterWithClientResourceClassRule())
.or(
miniClusterWithClientResourceRule()))))
// FALSE by default since 0.23.0 however not every module has *ITCase tests
.allowEmptyShould(true)
.as("ITCASE tests should use a MiniCluster resource or extension");
private static DescribedPredicate<JavaClass> miniClusterWithClientResourceClassRule() {
return containAnyFieldsInClassHierarchyThat(
arePublicStaticFinalOfTypeWithAnnotation(
"org.apache.flink.test.util.MiniClusterWithClientResource",
ClassRule.class));
}
private static DescribedPredicate<JavaClass> miniClusterWithClientResourceRule() {
return containAnyFieldsInClassHierarchyThat(
arePublicFinalOfTypeWithAnnotation(
"org.apache.flink.test.util.MiniClusterWithClientResource", Rule.class));
}
private static DescribedPredicate<JavaClass> inFlinkRuntimePackages() {
return JavaClass.Predicates.resideInAPackage("org.apache.flink.runtime.*");
}
private static DescribedPredicate<JavaClass> outsideFlinkRuntimePackages() {
return JavaClass.Predicates.resideOutsideOfPackage("org.apache.flink.runtime.*");
}
private static DescribedPredicate<JavaClass> miniClusterExtensionRule() {
// Only flink-runtime should use InternalMiniClusterExtension,
// other packages should use MiniClusterExtension
return Predicates.exactlyOneOf(
inFlinkRuntimePackages()
.and(
containAnyFieldsInClassHierarchyThat(
areStaticFinalOfTypeWithAnnotation(
INTERNAL_MINI_CLUSTER_EXTENSION_FQ_NAME,
RegisterExtension.class))),
outsideFlinkRuntimePackages()
.and(
containAnyFieldsInClassHierarchyThat(
areStaticFinalOfTypeWithAnnotation(
MINI_CLUSTER_EXTENSION_FQ_NAME,
RegisterExtension.class)
.or(
areFieldOfType(
MINI_CLUSTER_TEST_ENVIRONMENT_FQ_NAME)
.and(
annotatedWith(
TEST_ENV_FQ_NAME))))),
inFlinkRuntimePackages()
.and(
isAnnotatedWithExtendWithUsingExtension(
INTERNAL_MINI_CLUSTER_EXTENSION_FQ_NAME)),
outsideFlinkRuntimePackages()
.and(
isAnnotatedWithExtendWithUsingExtension(
MINI_CLUSTER_EXTENSION_FQ_NAME)));
}
private static DescribedPredicate<JavaClass> isAnnotatedWithExtendWithUsingExtension(
String extensionClassFqName) {
return DescribedPredicate.describe(
"is annotated with @ExtendWith with class "
+ getClassSimpleNameFromFqName(extensionClassFqName),
clazz ->
clazz.isAnnotatedWith(ExtendWith.class)
&& Arrays.stream(
clazz.getAnnotationOfType(ExtendWith.class).value())
.map(Class::getCanonicalName)
.anyMatch(s -> s.equals(extensionClassFqName)));
}
}
| ITCaseRules |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/OpenSslAsyncPrivateKeyMethod.java | {
"start": 803,
"end": 3052
} | interface ____ {
int SSL_SIGN_RSA_PKCS1_SHA1 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PKCS1_SHA1;
int SSL_SIGN_RSA_PKCS1_SHA256 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PKCS1_SHA256;
int SSL_SIGN_RSA_PKCS1_SHA384 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PKCS1_SHA384;
int SSL_SIGN_RSA_PKCS1_SHA512 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PKCS1_SHA512;
int SSL_SIGN_ECDSA_SHA1 = SSLPrivateKeyMethod.SSL_SIGN_ECDSA_SHA1;
int SSL_SIGN_ECDSA_SECP256R1_SHA256 = SSLPrivateKeyMethod.SSL_SIGN_ECDSA_SECP256R1_SHA256;
int SSL_SIGN_ECDSA_SECP384R1_SHA384 = SSLPrivateKeyMethod.SSL_SIGN_ECDSA_SECP384R1_SHA384;
int SSL_SIGN_ECDSA_SECP521R1_SHA512 = SSLPrivateKeyMethod.SSL_SIGN_ECDSA_SECP521R1_SHA512;
int SSL_SIGN_RSA_PSS_RSAE_SHA256 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PSS_RSAE_SHA256;
int SSL_SIGN_RSA_PSS_RSAE_SHA384 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PSS_RSAE_SHA384;
int SSL_SIGN_RSA_PSS_RSAE_SHA512 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PSS_RSAE_SHA512;
int SSL_SIGN_ED25519 = SSLPrivateKeyMethod.SSL_SIGN_ED25519;
int SSL_SIGN_RSA_PKCS1_MD5_SHA1 = SSLPrivateKeyMethod.SSL_SIGN_RSA_PKCS1_MD5_SHA1;
/**
* Signs the input with the given key and notifies the returned {@link Future} with the signed bytes.
*
* @param engine the {@link SSLEngine}
* @param signatureAlgorithm the algorithm to use for signing
* @param input the digest itself
* @return the {@link Future} that will be notified with the signed data
* (must not be {@code null}) when the operation completes.
*/
Future<byte[]> sign(SSLEngine engine, int signatureAlgorithm, byte[] input);
/**
* Decrypts the input with the given key and notifies the returned {@link Future} with the decrypted bytes.
*
* @param engine the {@link SSLEngine}
* @param input the input which should be decrypted
* @return the {@link Future} that will be notified with the decrypted data
* (must not be {@code null}) when the operation completes.
*/
Future<byte[]> decrypt(SSLEngine engine, byte[] input);
}
| OpenSslAsyncPrivateKeyMethod |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Montgomery.java | {
"start": 1980,
"end": 2496
} | class ____ {
private final LongLong x = new LongLong();
private final LongLong xN_I = new LongLong();
private final LongLong aN = new LongLong();
long m(final long c, final long d) {
LongLong.multiplication(x, c, d);
// a = (x * N')&(R - 1) = ((x & R_1) * N') & R_1
final long a = LongLong.multiplication(xN_I, x.and(R_1), N_I).and(R_1);
LongLong.multiplication(aN, a, N);
final long z = aN.plusEqual(x).shiftRight(s);
return z < N? z: z - N;
}
}
} | Product |
java | processing__processing4 | java/src/processing/mode/java/ErrorChecker.java | {
"start": 1657,
"end": 11560
} | class ____ {
// Delay delivering error check result after last sketch change
// https://github.com/processing/processing/issues/2677
private final static long DELAY_BEFORE_UPDATE = 650;
final private ScheduledExecutorService scheduler;
private volatile ScheduledFuture<?> scheduledUiUpdate = null;
private volatile long nextUiUpdate = 0;
private volatile boolean enabled;
private final Consumer<PreprocSketch> errorHandlerListener =
this::handleSketchProblems;
final private Consumer<List<Problem>> editor;
final private PreprocService pps;
public ErrorChecker(Consumer<List<Problem>> editor, PreprocService pps) {
this.editor = editor;
this.pps = pps;
scheduler = Executors.newSingleThreadScheduledExecutor();
enabled = JavaMode.errorCheckEnabled;
if (enabled) {
pps.registerListener(errorHandlerListener);
}
}
public void notifySketchChanged() {
nextUiUpdate = System.currentTimeMillis() + DELAY_BEFORE_UPDATE;
}
public void preferencesChanged() {
if (enabled != JavaMode.errorCheckEnabled) {
enabled = JavaMode.errorCheckEnabled;
if (enabled) {
pps.registerListener(errorHandlerListener);
} else {
pps.unregisterListener(errorHandlerListener);
editor.accept(Collections.emptyList());
nextUiUpdate = 0;
}
}
}
public void dispose() {
if (scheduler != null) {
scheduler.shutdownNow();
}
}
private void handleSketchProblems(PreprocSketch ps) {
Map<String, String[]> suggestCache =
JavaMode.importSuggestEnabled ? new HashMap<>() : Collections.emptyMap();
List<IProblem> iproblems;
if (ps.compilationUnit == null) {
iproblems = new ArrayList<>();
} else {
iproblems = ps.iproblems;
}
final List<Problem> problems = new ArrayList<>(ps.otherProblems);
if (problems.isEmpty()) { // Check for curly quotes
List<JavaProblem> curlyQuoteProblems = checkForCurlyQuotes(ps);
problems.addAll(curlyQuoteProblems);
}
if (problems.isEmpty()) {
AtomicReference<ClassPath> searchClassPath =
new AtomicReference<>(null);
List<Problem> cuProblems = iproblems.stream()
// Filter Warnings if they are not enabled
.filter(iproblem -> !(iproblem.isWarning() && !JavaMode.warningsEnabled))
.filter(iproblem -> !(isIgnorableProblem(iproblem)))
// Transform into our Problems
.map(iproblem -> {
JavaProblem p = convertIProblem(iproblem, ps);
// Handle import suggestions
if (p != null && JavaMode.importSuggestEnabled && isUndefinedTypeProblem(iproblem)) {
ClassPath cp = searchClassPath.updateAndGet(prev -> prev != null ?
prev : new ClassPathFactory().createFromPaths(ps.searchClassPathArray));
String[] s = suggestCache.computeIfAbsent(iproblem.getArguments()[0],
name -> getImportSuggestions(cp, name));
p.setImportSuggestions(s);
}
return p;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
problems.addAll(cuProblems);
}
if (scheduledUiUpdate != null) {
scheduledUiUpdate.cancel(true);
}
// https://github.com/processing/processing/issues/2677
long delay = nextUiUpdate - System.currentTimeMillis();
Runnable uiUpdater = () -> {
if (nextUiUpdate > 0 && System.currentTimeMillis() >= nextUiUpdate) {
EventQueue.invokeLater(() -> editor.accept(problems));
}
};
scheduledUiUpdate =
scheduler.schedule(uiUpdater, delay, TimeUnit.MILLISECONDS);
}
/**
* Determine if a problem can be suppressed from the user.
*
* <p>
* Determine if one can ignore an errors where an ignorable error is one
* "fixed" in later pipeline steps but can make JDT angry or do not actually
* cause issues when reaching javac.
* </p>
*
* @return True if ignorable and false otherwise.
*/
static private boolean isIgnorableProblem(IProblem iproblem) {
String message = iproblem.getMessage();
// Hide a useless error which is produced when a line ends with
// an identifier without a semicolon. "Missing a semicolon" is
// also produced and is preferred over this one.
// (Syntax error, insert ":: IdentifierOrNew" to complete Expression)
// See: https://bugs.eclipse.org/bugs/show_bug.cgi?id=405780
boolean ignorable =
message.contains("Syntax error, insert \":: IdentifierOrNew\"");
// It's ok if the file names do not line up during preprocessing.
ignorable |= message.contains("must be defined in its own file");
return ignorable;
}
static private JavaProblem convertIProblem(IProblem iproblem, PreprocSketch ps) {
String originalFileName = new String(iproblem.getOriginatingFileName());
boolean isJavaTab = ps.isJavaTab(originalFileName);
// Java tabs' content isn't stored in a sketch's combined source code file,
// so they are processed differently
if (!isJavaTab) {
SketchInterval in = ps.mapJavaToSketch(iproblem);
if (in != SketchInterval.BEFORE_START) {
String badCode = ps.getPdeCode(in);
int line = ps.tabOffsetToTabLine(in.tabIndex, in.startTabOffset);
JavaProblem p = JavaProblem.fromIProblem(iproblem, in.tabIndex, line, badCode);
p.setPDEOffsets(0, -1);
return p;
}
} else {
int tabIndex = ps.getJavaTabIndex(originalFileName);
int line = iproblem.getSourceLineNumber() - 1;
JavaProblem p = JavaProblem.fromIProblem(iproblem, tabIndex, line, "");
p.setPDEOffsets(0, -1);
return p;
}
return null;
}
static private boolean isUndefinedTypeProblem(IProblem iproblem) {
int id = iproblem.getID();
return id == IProblem.UndefinedType ||
id == IProblem.UndefinedName ||
id == IProblem.UnresolvedVariable;
}
static private boolean isMissingBraceProblem(IProblem iproblem) {
if (iproblem.getID() == IProblem.ParsingErrorInsertToComplete) {
char brace = iproblem.getArguments()[0].charAt(0);
return brace == '{' || brace == '}';
} else if (iproblem.getID() == IProblem.ParsingErrorInsertTokenAfter) {
char brace = iproblem.getArguments()[1].charAt(0);
return brace == '{' || brace == '}';
}
return false;
}
static private final Pattern CURLY_QUOTE_REGEX =
Pattern.compile("([“”‘’])", Pattern.UNICODE_CHARACTER_CLASS);
/**
* Check the scrubbed code for curly quotes.
* They are a common copy/paste error, especially on macOS.
*/
static private List<JavaProblem> checkForCurlyQuotes(PreprocSketch ps) {
if (ps.compilationUnit == null) {
return new ArrayList<>();
}
List<JavaProblem> problems = new ArrayList<>(0);
Matcher matcher = CURLY_QUOTE_REGEX.matcher(ps.scrubbedPdeCode);
while (matcher.find()) {
int pdeOffset = matcher.start();
String q = matcher.group();
int tabIndex = ps.pdeOffsetToTabIndex(pdeOffset);
int tabOffset = ps.pdeOffsetToTabOffset(tabIndex, pdeOffset);
int tabLine = ps.tabOffsetToTabLine(tabIndex, tabOffset);
String message = Language.interpolate("editor.status.bad_curly_quote", q);
JavaProblem problem = new JavaProblem(message, JavaProblem.ERROR, tabIndex, tabLine);
problems.add(problem);
}
// Go through iproblems and look for problems involving curly quotes
List<JavaProblem> problems2 = new ArrayList<>(0);
IProblem[] iproblems = ps.compilationUnit.getProblems();
for (IProblem iproblem : iproblems) {
switch (iproblem.getID()) {
case IProblem.ParsingErrorDeleteToken,
IProblem.ParsingErrorDeleteTokens,
IProblem.ParsingErrorInvalidToken,
IProblem.ParsingErrorReplaceTokens,
IProblem.UnterminatedString -> {
SketchInterval in = ps.mapJavaToSketch(iproblem);
if (in == SketchInterval.BEFORE_START) continue;
String badCode = ps.getPdeCode(in);
matcher.reset(badCode);
while (matcher.find()) {
int offset = matcher.start();
String q = matcher.group();
int tabStart = in.startTabOffset + offset;
int tabStop = tabStart + 1;
int line = ps.tabOffsetToTabLine(in.tabIndex, tabStart);
// Prevent duplicate problems
boolean isDupe = problems.stream()
.filter(p -> p.getTabIndex() == in.tabIndex)
.filter(p -> p.getLineNumber() == line)
.findAny()
.isPresent();
if (isDupe) {
String message;
if (iproblem.getID() == IProblem.UnterminatedString) {
message = Language.interpolate("editor.status.unterm_string_curly", q);
} else {
message = Language.interpolate("editor.status.bad_curly_quote", q);
}
JavaProblem p = new JavaProblem(message, JavaProblem.ERROR, in.tabIndex, line);
problems2.add(p);
}
}
}
}
}
problems.addAll(problems2);
return problems;
}
static public String[] getImportSuggestions(ClassPath cp, String className) {
className = className.replace("[", "\\[").replace("]", "\\]");
RegExpResourceFilter filter = new RegExpResourceFilter(
Pattern.compile(".*"),
Pattern.compile("(.*\\$)?" + className + "\\.class",
Pattern.CASE_INSENSITIVE));
String[] resources = cp.findResources("", filter);
return Arrays.stream(resources)
// remove ".class" suffix
.map(res -> res.substring(0, res.length() - 6))
// replace path separators with dots
.map(res -> res.replace('/', '.'))
// replace inner | ErrorChecker |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/runtime/metrics/SystemResourcesMetricsITCase.java | {
"start": 2236,
"end": 4968
} | class ____ {
@RegisterExtension
@Order(1)
static final ContextClassLoaderExtension CONTEXT_CLASS_LOADER_EXTENSION =
ContextClassLoaderExtension.builder()
.withServiceEntry(
MetricReporterFactory.class,
SystemResourcesMetricsITCase.TestReporter.class.getName())
.build();
@RegisterExtension
@Order(2)
static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
new MiniClusterExtension(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(getConfiguration())
.setNumberTaskManagers(1)
.setNumberSlotsPerTaskManager(1)
.build());
private static Configuration getConfiguration() {
Configuration configuration = new Configuration();
configuration.set(SYSTEM_RESOURCE_METRICS, true);
configuration.set(REPORTERS_LIST, "test_reporter");
configuration.set(MetricOptions.SCOPE_NAMING_JM, "jobmanager");
configuration.set(MetricOptions.SCOPE_NAMING_TM, "taskmanager");
MetricOptions.forReporter(configuration, "test_reporter")
.set(MetricOptions.REPORTER_FACTORY_CLASS, TestReporter.class.getName());
return configuration;
}
@Test
void startTaskManagerAndCheckForRegisteredSystemMetrics() throws Exception {
assertEquals(1, TestReporter.OPENED_REPORTERS.size());
TestReporter reporter = TestReporter.OPENED_REPORTERS.iterator().next();
reporter.patternsExhaustedFuture.get(10, TimeUnit.SECONDS);
}
private static List<String> getExpectedPatterns() {
String[] expectedGauges = {
"System.CPU.Idle",
"System.CPU.Sys",
"System.CPU.User",
"System.CPU.IOWait",
"System.CPU.Irq",
"System.CPU.SoftIrq",
"System.CPU.Steal",
"System.CPU.Nice",
"System.Memory.Available",
"System.Memory.Total",
"System.Swap.Used",
"System.Swap.Total",
"System.Network.*ReceiveRate",
"System.Network.*SendRate"
};
String[] expectedHosts = {"taskmanager.", "jobmanager."};
List<String> patterns = new ArrayList<>();
for (String expectedHost : expectedHosts) {
for (String expectedGauge : expectedGauges) {
patterns.add(expectedHost + expectedGauge);
}
}
return patterns;
}
/** Test metric reporter that exposes registered metrics. */
public static final | SystemResourcesMetricsITCase |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/IndicativeSentencesOnSubClassTestCase.java | {
"start": 495,
"end": 562
} | class ____ {
@Test
void this_is_a_test() {
}
}
}
}
| Level_2 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/access/UnsupportedEnhancementStrategyTest.java | {
"start": 7113,
"end": 7435
} | class ____ {
@Id
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long get() {
return id;
}
public String getSomething() {
return "something";
}
}
@Entity(name = "ImplicitAccessTypeFieldEntity")
static | ExplicitAccessTypeFieldEntity |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java | {
"start": 1777,
"end": 11142
} | class ____ extends SubjectInheritingThread {
private static final Logger LOG = LoggerFactory.
getLogger(FSPreemptionThread.class);
protected final FSContext context;
private final FairScheduler scheduler;
private final long warnTimeBeforeKill;
private final long delayBeforeNextStarvationCheck;
private final Timer preemptionTimer;
private final Lock schedulerReadLock;
@SuppressWarnings("deprecation")
FSPreemptionThread(FairScheduler scheduler) {
setDaemon(true);
setName("FSPreemptionThread");
this.scheduler = scheduler;
this.context = scheduler.getContext();
FairSchedulerConfiguration fsConf = scheduler.getConf();
context.setPreemptionEnabled();
context.setPreemptionUtilizationThreshold(
fsConf.getPreemptionUtilizationThreshold());
preemptionTimer = new Timer("Preemption Timer", true);
warnTimeBeforeKill = fsConf.getWaitTimeBeforeKill();
long allocDelay = (fsConf.isContinuousSchedulingEnabled()
? 10 * fsConf.getContinuousSchedulingSleepMs() // 10 runs
: 4 * scheduler.getNMHeartbeatInterval()); // 4 heartbeats
delayBeforeNextStarvationCheck = warnTimeBeforeKill + allocDelay +
fsConf.getWaitTimeBeforeNextStarvationCheck();
schedulerReadLock = scheduler.getSchedulerReadLock();
}
@Override
public void work() {
while (!Thread.interrupted()) {
try {
FSAppAttempt starvedApp = context.getStarvedApps().take();
// Hold the scheduler readlock so this is not concurrent with the
// update thread.
schedulerReadLock.lock();
try {
preemptContainers(identifyContainersToPreempt(starvedApp));
} finally {
schedulerReadLock.unlock();
}
starvedApp.preemptionTriggered(delayBeforeNextStarvationCheck);
} catch (InterruptedException e) {
LOG.info("Preemption thread interrupted! Exiting.");
Thread.currentThread().interrupt();
}
}
}
/**
* Given an app, identify containers to preempt to satisfy the app's
* starvation.
*
* Mechanics:
* 1. Fetch all {@link ResourceRequest}s corresponding to the amount of
* starvation.
* 2. For each {@link ResourceRequest}, get the best preemptable containers.
*
* @param starvedApp starved application for which we are identifying
* preemption targets
* @return list of containers to preempt to satisfy starvedApp
*/
private List<RMContainer> identifyContainersToPreempt(
FSAppAttempt starvedApp) {
List<RMContainer> containersToPreempt = new ArrayList<>();
// Iterate through enough RRs to address app's starvation
for (ResourceRequest rr : starvedApp.getStarvedResourceRequests()) {
List<FSSchedulerNode> potentialNodes = scheduler.getNodeTracker()
.getNodesByResourceName(rr.getResourceName());
for (int i = 0; i < rr.getNumContainers(); i++) {
PreemptableContainers bestContainers =
getBestPreemptableContainers(rr, potentialNodes);
if (bestContainers != null) {
List<RMContainer> containers = bestContainers.getAllContainers();
if (containers.size() > 0) {
containersToPreempt.addAll(containers);
// Reserve the containers for the starved app
trackPreemptionsAgainstNode(containers, starvedApp);
// Warn application about containers to be killed
for (RMContainer container : containers) {
FSAppAttempt app = scheduler.getSchedulerApp(
container.getApplicationAttemptId());
LOG.info("Preempting container " + container + " from queue: "
+ (app != null ? app.getQueueName() : "unknown"));
// If the app has unregistered while building the container list
// the app might be null, skip notifying the app
if (app != null) {
app.trackContainerForPreemption(container);
}
}
}
}
}
} // End of iteration over RRs
return containersToPreempt;
}
private PreemptableContainers identifyContainersToPreemptForOneContainer(
List<FSSchedulerNode> potentialNodes, ResourceRequest rr) {
PreemptableContainers bestContainers = null;
int maxAMContainers = Integer.MAX_VALUE;
for (FSSchedulerNode node : potentialNodes) {
PreemptableContainers preemptableContainers =
identifyContainersToPreemptOnNode(
rr.getCapability(), node, maxAMContainers);
if (preemptableContainers != null) {
// This set is better than any previously identified set.
bestContainers = preemptableContainers;
maxAMContainers = bestContainers.numAMContainers;
if (maxAMContainers == 0) {
break;
}
}
}
return bestContainers;
}
/**
* Identify containers to preempt on a given node. Try to find a list with
* least AM containers to avoid preempting AM containers. This method returns
* a non-null set of containers only if the number of AM containers is less
* than maxAMContainers.
*
* @param request resource requested
* @param node the node to check
* @param maxAMContainers max allowed AM containers in the set
* @return list of preemptable containers with fewer AM containers than
* maxAMContainers if such a list exists; null otherwise.
*/
private PreemptableContainers identifyContainersToPreemptOnNode(
Resource request, FSSchedulerNode node, int maxAMContainers) {
PreemptableContainers preemptableContainers =
new PreemptableContainers(maxAMContainers);
// Figure out list of containers to consider
List<RMContainer> containersToCheck =
node.getRunningContainersWithAMsAtTheEnd();
containersToCheck.removeAll(node.getContainersForPreemption());
// Initialize potential with unallocated but not reserved resources
Resource potential = Resources.subtractFromNonNegative(
Resources.clone(node.getUnallocatedResource()),
node.getTotalReserved());
for (RMContainer container : containersToCheck) {
FSAppAttempt app =
scheduler.getSchedulerApp(container.getApplicationAttemptId());
// If the app has unregistered while building the container list the app
// might be null, just skip this container: it should be cleaned up soon
if (app == null) {
LOG.info("Found container " + container + " on node "
+ node.getNodeName() + "without app, skipping preemption");
continue;
}
ApplicationId appId = app.getApplicationId();
if (app.canContainerBePreempted(container,
preemptableContainers.getResourcesToPreemptForApp(appId))) {
// Flag container for preemption
if (!preemptableContainers.addContainer(container, appId)) {
return null;
}
Resources.addTo(potential, container.getAllocatedResource());
}
// Check if we have already identified enough containers
if (Resources.fitsIn(request, potential)) {
return preemptableContainers;
}
}
// Return null if the sum of all preemptable containers' resources
// isn't enough to satisfy the starved request.
return null;
}
private void trackPreemptionsAgainstNode(List<RMContainer> containers,
FSAppAttempt app) {
FSSchedulerNode node = scheduler.getNodeTracker()
.getNode(containers.get(0).getNodeId());
node.addContainersForPreemption(containers, app);
}
private void preemptContainers(List<RMContainer> containers) {
// Schedule timer task to kill containers
preemptionTimer.schedule(
new PreemptContainersTask(containers), warnTimeBeforeKill);
}
/**
* Iterate through matching nodes and identify containers to preempt all on
* one node, also optimizing for least number of AM container preemptions.
* Only nodes that match the locality level specified in the
* {@link ResourceRequest} are considered. However, if this would lead to
* AM preemption, and locality relaxation is allowed, then the search space
* is expanded to the remaining nodes.
*
* @param rr resource request
* @param potentialNodes list of {@link FSSchedulerNode}
* @return the list of best preemptable containers for the resource request
*/
private PreemptableContainers getBestPreemptableContainers(ResourceRequest rr,
List<FSSchedulerNode> potentialNodes) {
PreemptableContainers bestContainers =
identifyContainersToPreemptForOneContainer(potentialNodes, rr);
if (rr.getRelaxLocality()
&& !ResourceRequest.isAnyLocation(rr.getResourceName())
&& bestContainers != null
&& bestContainers.numAMContainers > 0) {
List<FSSchedulerNode> remainingNodes =
scheduler.getNodeTracker().getAllNodes();
remainingNodes.removeAll(potentialNodes);
PreemptableContainers spareContainers =
identifyContainersToPreemptForOneContainer(remainingNodes, rr);
if (spareContainers != null && spareContainers.numAMContainers
< bestContainers.numAMContainers) {
bestContainers = spareContainers;
}
}
return bestContainers;
}
private | FSPreemptionThread |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/connector/dsv2/Sink.java | {
"start": 915,
"end": 972
} | interface ____ DataStream api v2.
*
* <p>Note that this | for |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java | {
"start": 3188,
"end": 3330
} | class ____ start and stop secret manager as part of service
// framework and implement state recovery for secret manager on startup
private | to |
java | apache__camel | test-infra/camel-test-infra-kafka/src/main/java/org/apache/camel/test/infra/kafka/services/ContainerLocalKafkaInfraService.java | {
"start": 1520,
"end": 2579
} | class ____ implements KafkaInfraService, ContainerService<KafkaContainer> {
public static final String KAFKA3_IMAGE_NAME = LocalPropertyResolver.getProperty(
ContainerLocalKafkaInfraService.class,
KafkaProperties.KAFKA3_CONTAINER);
private static final Logger LOG = LoggerFactory.getLogger(ContainerLocalKafkaInfraService.class);
protected KafkaContainer kafka;
public ContainerLocalKafkaInfraService() {
kafka = initContainer();
String name = ContainerEnvironmentUtil.containerName(this.getClass());
if (name != null) {
kafka.withCreateContainerCmdModifier(cmd -> cmd.withName(name));
}
}
public ContainerLocalKafkaInfraService(KafkaContainer kafka) {
this.kafka = kafka;
String name = ContainerEnvironmentUtil.containerName(this.getClass());
if (name != null) {
kafka.withCreateContainerCmdModifier(cmd -> cmd.withName(name));
}
}
protected KafkaContainer initContainer() {
| ContainerLocalKafkaInfraService |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInterface.java | {
"start": 1250,
"end": 1676
} | interface ____ {
/**
* Checks whether the given method name is supported by the server.
* It is assumed that all method names are unique for a protocol.
* @param methodName The name of the method
* @return true if method is supported, otherwise false.
* @throws IOException raised on errors performing I/O.
*/
public boolean isMethodSupported(String methodName) throws IOException;
}
| ProtocolMetaInterface |
java | quarkusio__quarkus | extensions/hibernate-search-orm-elasticsearch/runtime/src/main/java/io/quarkus/hibernate/search/orm/elasticsearch/runtime/HibernateSearchElasticsearchRecorder.java | {
"start": 14498,
"end": 15504
} | class ____
implements HibernateOrmIntegrationRuntimeInitListener {
private HibernateSearchIntegrationRuntimeInitInactiveListener() {
}
@Override
public void contributeRuntimeProperties(BiConsumer<String, Object> propertyCollector) {
// Not strictly necessary since this should be set during static init,
// but let's be on the safe side.
propertyCollector.accept(HibernateOrmMapperSettings.ENABLED, false);
}
@Override
public List<StandardServiceInitiator<?>> contributeServiceInitiators() {
return List.of(
// The service must be initiated even if Hibernate Search is not supposed to start,
// because it's also responsible for determining that Hibernate Search should not start.
new HibernateSearchPreIntegrationService.Initiator());
}
}
private static final | HibernateSearchIntegrationRuntimeInitInactiveListener |
java | spring-projects__spring-boot | module/spring-boot-ldap/src/main/java/org/springframework/boot/ldap/testcontainers/LLdapContainerConnectionDetailsFactory.java | {
"start": 1293,
"end": 1630
} | class ____
extends ContainerConnectionDetailsFactory<LLdapContainer, LdapConnectionDetails> {
@Override
protected LdapConnectionDetails getContainerConnectionDetails(ContainerConnectionSource<LLdapContainer> source) {
return new LLdapContainerConnectionDetails(source);
}
private static final | LLdapContainerConnectionDetailsFactory |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java | {
"start": 129408,
"end": 129985
} | class ____ extends PrimaryContext {
public TerminalNode TRUE() {
return getToken(PainlessParser.TRUE, 0);
}
public TrueContext(PrimaryContext ctx) {
copyFrom(ctx);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof PainlessParserVisitor) return ((PainlessParserVisitor<? extends T>) visitor).visitTrue(this);
else return visitor.visitChildren(this);
}
}
@SuppressWarnings("CheckReturnValue")
public static | TrueContext |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/logging/LoggersEndpoint.java | {
"start": 6822,
"end": 7244
} | class ____ extends LoggerLevelsDescriptor {
private final List<String> members;
public GroupLoggerLevelsDescriptor(@Nullable LogLevel configuredLevel, List<String> members) {
super(configuredLevel);
this.members = members;
}
public List<String> getMembers() {
return this.members;
}
}
/**
* Description of levels configured for a given single logger.
*/
public static | GroupLoggerLevelsDescriptor |
java | quarkusio__quarkus | extensions/spring-boot-properties/deployment/src/main/java/io/quarkus/spring/boot/properties/deployment/ConfigurationPropertiesUtil.java | {
"start": 3675,
"end": 8450
} | class ____ the type was encountered
* @param bytecodeCreator Where the bytecode will be generated
* @param config Reference to the MP config object
*/
static ReadOptionalResponse createReadOptionalValueAndConvertIfNeeded(String propertyName, Type resultType,
DotName declaringClass,
BytecodeCreator bytecodeCreator, ResultHandle config) {
ResultHandle optionalValue;
if (isMap(resultType)) {
throw new DeploymentException(
"Using a Map is not supported for classes annotated with '@ConfigProperties'. Consider using https://quarkus.io/guides/config-mappings instead.");
}
if (isCollection(resultType)) {
ResultHandle smallryeConfig = bytecodeCreator.checkCast(config, SmallRyeConfig.class);
Class<?> factoryToUse = DotNames.SET.equals(resultType.name()) ? HashSetFactory.class : ArrayListFactory.class;
ResultHandle collectionFactory = bytecodeCreator.invokeStaticMethod(
MethodDescriptor.ofMethod(factoryToUse, "getInstance", factoryToUse));
optionalValue = bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(SmallRyeConfig.class, "getOptionalValues", Optional.class, String.class,
Class.class, IntFunction.class),
smallryeConfig,
bytecodeCreator.load(propertyName),
bytecodeCreator.loadClassFromTCCL(determineSingleGenericType(resultType, declaringClass).name().toString()),
collectionFactory);
} else {
optionalValue = bytecodeCreator.invokeInterfaceMethod(
MethodDescriptor.ofMethod(Config.class, "getOptionalValue", Optional.class, String.class,
Class.class),
config, bytecodeCreator.load(propertyName),
bytecodeCreator.loadClassFromTCCL(resultType.name().toString()));
}
ResultHandle isPresent = bytecodeCreator
.invokeVirtualMethod(MethodDescriptor.ofMethod(Optional.class, "isPresent", boolean.class), optionalValue);
BranchResult isPresentBranch = bytecodeCreator.ifNonZero(isPresent);
BytecodeCreator isPresentTrue = isPresentBranch.trueBranch();
ResultHandle value = isPresentTrue.invokeVirtualMethod(MethodDescriptor.ofMethod(Optional.class, "get", Object.class),
optionalValue);
return new ReadOptionalResponse(value, isPresentTrue, isPresentBranch.falseBranch());
}
public static boolean isListOfObject(Type type) {
if (type.kind() != Type.Kind.PARAMETERIZED_TYPE) {
return false;
}
ParameterizedType parameterizedType = (ParameterizedType) type;
if (!DotNames.LIST.equals(parameterizedType.name())) {
return false;
}
if (parameterizedType.arguments().size() != 1) {
return false;
}
return !parameterizedType.arguments().get(0).name().toString().startsWith("java");
}
private static boolean isCollection(final Type resultType) {
return DotNames.COLLECTION.equals(resultType.name()) ||
DotNames.LIST.equals(resultType.name()) ||
DotNames.SET.equals(resultType.name());
}
private static boolean isMap(final Type resultType) {
return DotNames.MAP.equals(resultType.name()) ||
DotNames.HASH_MAP.equals(resultType.name());
}
static Type determineSingleGenericType(Type type, DotName declaringClass) {
if (!(type.kind() == Type.Kind.PARAMETERIZED_TYPE)) {
throw new IllegalArgumentException("Type " + type.name().toString() + " which is used in class " + declaringClass
+ " must define a generic argument");
}
ParameterizedType parameterizedType = type.asParameterizedType();
if (parameterizedType.arguments().size() != 1) {
throw new IllegalArgumentException("Type " + type.name().toString() + " which is used in class " + declaringClass
+ " must define a single generic argument");
}
return type.asParameterizedType().arguments().get(0);
}
static void registerImplicitConverter(Type type, BuildProducer<ReflectiveClassBuildItem> reflectiveClasses) {
// We need to register for reflection in case an implicit converter is required.
if (!ConfigBuildStep.isHandledByProducers(type)) {
if (type.kind() != Type.Kind.ARRAY) {
reflectiveClasses
.produce(ReflectiveClassBuildItem.builder(type.name().toString()).methods().build());
}
}
}
static | where |
java | apache__camel | components/camel-pqc/src/test/java/org/apache/camel/component/pqc/PQCSignatureDILITHIUMTest.java | {
"start": 1512,
"end": 3507
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:sign")
protected MockEndpoint resultSign;
@EndpointInject("mock:verify")
protected MockEndpoint resultVerify;
@Produce("direct:sign")
protected ProducerTemplate templateSign;
public PQCSignatureDILITHIUMTest() throws NoSuchAlgorithmException {
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:sign").to("pqc:sign?operation=sign").to("mock:sign").to("pqc:verify?operation=verify")
.to("mock:verify");
}
};
}
@BeforeAll
public static void startup() throws Exception {
Security.addProvider(new BouncyCastleProvider());
Security.addProvider(new BouncyCastlePQCProvider());
}
@Test
void testSignAndVerify() throws Exception {
resultSign.expectedMessageCount(1);
resultVerify.expectedMessageCount(1);
templateSign.sendBody("Hello");
resultSign.assertIsSatisfied();
resultVerify.assertIsSatisfied();
assertTrue(resultVerify.getExchanges().get(0).getMessage().getHeader(PQCConstants.VERIFY, Boolean.class));
}
@BindToRegistry("Keypair")
public KeyPair setKeyPair() throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException {
KeyPairGenerator kpGen = KeyPairGenerator.getInstance(PQCSignatureAlgorithms.DILITHIUM.getAlgorithm(),
PQCSignatureAlgorithms.DILITHIUM.getBcProvider());
kpGen.initialize(DilithiumParameterSpec.dilithium5);
KeyPair kp = kpGen.generateKeyPair();
return kp;
}
@BindToRegistry("Signer")
public Signature getSigner() throws NoSuchAlgorithmException {
Signature mlDsa = Signature.getInstance(PQCSignatureAlgorithms.DILITHIUM.getAlgorithm());
return mlDsa;
}
}
| PQCSignatureDILITHIUMTest |
java | processing__processing4 | core/src/processing/opengl/VertexBuffer.java | {
"start": 1388,
"end": 2667
} | interface ____ Processing and OpenGL.
protected int context; // The context that created this resource.
private GLResourceVertexBuffer glres;
VertexBuffer(PGraphicsOpenGL pg, int target, int ncoords, int esize, int usage) {
this(pg, target, ncoords, esize, usage, false);
}
VertexBuffer(PGraphicsOpenGL pg, int target, int ncoords, int esize, int usage, boolean index) {
pgl = pg.pgl;
context = pgl.createEmptyContext();
this.target = target;
this.ncoords = ncoords;
this.elementSize = esize;
this.index = index;
create();
init(usage);
}
protected void create() {
context = pgl.getCurrentContext();
glres = new GLResourceVertexBuffer(this);
}
protected void init(int usage) {
int size = index ? ncoords * INIT_INDEX_BUFFER_SIZE * elementSize :
ncoords * INIT_VERTEX_BUFFER_SIZE * elementSize;
pgl.bindBuffer(target, glId);
pgl.bufferData(target, size, null, usage);
}
protected void dispose() {
if (glres != null) {
glres.dispose();
glId = 0;
glres = null;
}
}
protected boolean contextIsOutdated() {
boolean outdated = !pgl.contextIsCurrent(context);
if (outdated) {
dispose();
}
return outdated;
}
}
| between |
java | google__guice | extensions/throwingproviders/src/com/google/inject/throwingproviders/ProviderChecker.java | {
"start": 522,
"end": 3253
} | class ____ {
private ProviderChecker() {}
static <P extends CheckedProvider<?>> void checkInterface(
Class<P> interfaceType, Optional<? extends Type> valueType) {
checkArgument(interfaceType.isInterface(), "%s must be an interface", interfaceType.getName());
checkArgument(
interfaceType.getGenericInterfaces().length == 1,
"%s must extend CheckedProvider (and only CheckedProvider)",
interfaceType);
boolean tpMode = interfaceType.getInterfaces()[0] == ThrowingProvider.class;
if (!tpMode) {
checkArgument(
interfaceType.getInterfaces()[0] == CheckedProvider.class,
"%s must extend CheckedProvider (and only CheckedProvider)",
interfaceType);
}
// Ensure that T is parameterized and unconstrained.
ParameterizedType genericThrowingProvider =
(ParameterizedType) interfaceType.getGenericInterfaces()[0];
if (interfaceType.getTypeParameters().length == 1) {
String returnTypeName = interfaceType.getTypeParameters()[0].getName();
Type returnType = genericThrowingProvider.getActualTypeArguments()[0];
checkArgument(
returnType instanceof TypeVariable,
"%s does not properly extend CheckedProvider, the first type parameter of CheckedProvider"
+ " (%s) is not a generic type",
interfaceType,
returnType);
checkArgument(
returnTypeName.equals(((TypeVariable) returnType).getName()),
"The generic type (%s) of %s does not match the generic type of CheckedProvider (%s)",
returnTypeName,
interfaceType,
((TypeVariable) returnType).getName());
} else {
checkArgument(
interfaceType.getTypeParameters().length == 0,
"%s has more than one generic type parameter: %s",
interfaceType,
Arrays.asList(interfaceType.getTypeParameters()));
if (valueType.isPresent()) {
checkArgument(
genericThrowingProvider.getActualTypeArguments()[0].equals(valueType.get()),
"%s expects the value type to be %s, but it was %s",
interfaceType,
genericThrowingProvider.getActualTypeArguments()[0],
valueType.get());
}
}
if (tpMode) { // only validate exception in ThrowingProvider mode.
Type exceptionType = genericThrowingProvider.getActualTypeArguments()[1];
checkArgument(
exceptionType instanceof Class,
"%s has the wrong Exception generic type (%s) when extending CheckedProvider",
interfaceType,
exceptionType);
}
// Skip synthetic/bridge methods because java8 generates
// a default method on the | ProviderChecker |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/strategies/AnyArgumentTypeStrategy.java | {
"start": 1312,
"end": 1999
} | class ____ implements ArgumentTypeStrategy {
@Override
public Optional<DataType> inferArgumentType(
CallContext callContext, int argumentPos, boolean throwOnFailure) {
return Optional.of(callContext.getArgumentDataTypes().get(argumentPos));
}
@Override
public Argument getExpectedArgument(FunctionDefinition functionDefinition, int argumentPos) {
return Argument.ofGroup("ANY");
}
@Override
public boolean equals(Object o) {
return this == o || o instanceof AnyArgumentTypeStrategy;
}
@Override
public int hashCode() {
return AnyArgumentTypeStrategy.class.hashCode();
}
}
| AnyArgumentTypeStrategy |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/integration/multiple/AbstractStorage.java | {
"start": 951,
"end": 1064
} | class ____ implement the basic methods for {@link Storage}.
*
* @param <T> The type to store
*/
public abstract | to |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/http/HttpClientTimeoutTest.java | {
"start": 1170,
"end": 13067
} | class ____ extends HttpTestBase {
@Test
public void testConnectTimeoutDoesFire() throws Exception {
int timeout = 3000;
server.requestHandler(req -> {
req.response().end();
});
startServer(testAddress);
List<HttpClientRequest> requests = new ArrayList<>();
for (int i = 0;i < 5;i++) {
HttpClientRequest request = client.request(new RequestOptions(requestOptions)).await();
requests.add(request);
}
long now = System.currentTimeMillis();
client.request(new RequestOptions(requestOptions).setConnectTimeout(timeout).setURI("/slow"))
.onComplete(onFailure(err -> {
assertTrue(System.currentTimeMillis() - now >= timeout);
testComplete();
}));
await();
}
@Test
public void testConnectTimeoutDoesNotFire() throws Exception {
int timeout = 3000;
int ratio = 50;
server.requestHandler(req -> {
req.response().end();
});
startServer(testAddress);
List<HttpClientRequest> requests = new ArrayList<>();
for (int i = 0;i < 5;i++) {
HttpClientRequest request = client.request(new RequestOptions(requestOptions)).await();
requests.add(request);
}
vertx.setTimer(timeout * ratio / 100, id -> {
requests.forEach(req -> {
req.send().compose(HttpClientResponse::body);
});
});
long now = System.currentTimeMillis();
client.request(new RequestOptions(requestOptions).setConnectTimeout(timeout).setURI("/slow"))
.onComplete(onSuccess(req -> {
long elapsed = System.currentTimeMillis() - now;
assertTrue(elapsed >= timeout * ratio / 100);
assertTrue(elapsed <= timeout);
testComplete();
}));
await();
}
@Test
public void testTimedOutWaiterDoesNotConnect() throws Exception {
Assume.assumeTrue("Domain socket don't pass this test", testAddress.isInetSocket());
Assume.assumeTrue("HTTP/2 don't pass this test", createBaseClientOptions().getProtocolVersion() == HttpVersion.HTTP_1_1);
long responseDelay = 300;
int requests = 6;
CountDownLatch firstCloseLatch = new CountDownLatch(1);
server.close().onComplete(onSuccess(v -> firstCloseLatch.countDown()));
// Make sure server is closed before continuing
awaitLatch(firstCloseLatch);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(false), new PoolOptions().setHttp1MaxSize(1));
AtomicInteger requestCount = new AtomicInteger(0);
// We need a net server because we need to intercept the socket connection, not just full http requests
NetServer server = vertx.createNetServer();
server.connectHandler(socket -> {
Buffer content = Buffer.buffer();
AtomicBoolean closed = new AtomicBoolean();
socket.closeHandler(v -> closed.set(true));
socket.handler(buff -> {
content.appendBuffer(buff);
if (buff.toString().endsWith("\r\n\r\n")) {
// Delay and write a proper http response
vertx.setTimer(responseDelay, time -> {
if (!closed.get()) {
requestCount.incrementAndGet();
socket.write("HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK");
}
});
}
});
});
CountDownLatch latch = new CountDownLatch(requests);
server.listen(testAddress).await(20, TimeUnit.SECONDS);
for(int count = 0; count < requests; count++) {
if (count % 2 == 0) {
client.request(requestOptions)
.compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body))
.onComplete(onSuccess(buff -> {
assertEquals("OK", buff.toString());
latch.countDown();
}));
} else {
// Odd requests get a timeout less than the responseDelay, since we have a pool size of one and a delay all but
// the first request should end up in the wait queue, the odd numbered requests should time out so we should get
// (requests + 1 / 2) connect attempts
client
.request(new RequestOptions(requestOptions).setConnectTimeout(responseDelay / 2))
.onComplete(onFailure(err -> {
latch.countDown();
}));
}
}
awaitLatch(latch);
assertEquals("Incorrect number of connect attempts.", (requests + 1) / 2, requestCount.get());
server.close();
}
@Test
public void testRequestTimeoutIsNotDelayedAfterResponseIsReceived() throws Exception {
int n = 6;
waitFor(n);
server.requestHandler(req -> {
req.response().end();
});
startServer(testAddress);
vertx.deployVerticle(new AbstractVerticle() {
@Override
public void start() throws Exception {
client.close();
client = vertx.createHttpClient(createBaseClientOptions(), new PoolOptions().setHttp1MaxSize(1));
for (int i = 0;i < n;i++) {
AtomicBoolean responseReceived = new AtomicBoolean();
client.request(requestOptions).onComplete(onSuccess(req -> {
req.idleTimeout(500);
req.send().onComplete(onSuccess(resp -> {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
fail(e);
}
responseReceived.set(true);
// Complete later, if some timeout tasks have been queued, this will be executed after
vertx.runOnContext(v -> complete());
}));
}));
}
}
}, new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER));
await();
}
@Test
public void testRequestTimeoutCanceledWhenRequestEndsNormally() throws Exception {
server.requestHandler(req -> req.response().end());
startServer(testAddress);
AtomicReference<Throwable> exception = new AtomicReference<>();
client.request(requestOptions).onComplete(onSuccess(req -> {
req
.exceptionHandler(exception::set)
.idleTimeout(500)
.end();
vertx.setTimer(1000, id -> {
assertNull("Did not expect any exception", exception.get());
testComplete();
});
}));
await();
}
@Test
public void testRequestTimeoutCanceledWhenRequestHasAnOtherError() {
Assume.assumeFalse(Utils.isWindows());
AtomicReference<Throwable> exception = new AtomicReference<>();
// There is no server running, should fail to connect
client.request(new RequestOptions().setPort(5000).setIdleTimeout(800))
.onComplete(onFailure(exception::set));
vertx.setTimer(1500, id -> {
assertNotNull("Expected an exception to be set", exception.get());
assertFalse("Expected to not end with timeout exception, but did: " + exception.get(), exception.get() instanceof TimeoutException);
testComplete();
});
await();
}
@Test
public void testHttpClientRequestTimeoutResetsTheConnection() throws Exception {
waitFor(3);
server.requestHandler(req -> {
AtomicBoolean errored = new AtomicBoolean();
req.exceptionHandler(err -> {
if (errored.compareAndSet(false, true)) {
if (req.version() == HttpVersion.HTTP_2) {
StreamResetException reset = (StreamResetException) err;
assertEquals(8, reset.getCode());
}
complete();
}
});
});
startServer(testAddress);
client.request(requestOptions).onComplete(onSuccess(req -> {
req.response().onComplete(onFailure(err -> {
complete();
}));
req.setChunked(true).writeHead().onComplete(onSuccess(version -> req.idleTimeout(500)));
AtomicBoolean errored = new AtomicBoolean();
req.exceptionHandler(err -> {
if (errored.compareAndSet(false, true)) {
complete();
}
});
}));
await();
}
@Test
public void testResponseDataTimeout() throws Exception {
waitFor(2);
Buffer expected = TestUtils.randomBuffer(1000);
server.requestHandler(req -> {
req.response().setChunked(true).write(expected);
});
startServer(testAddress);
Buffer received = Buffer.buffer();
client.request(requestOptions).onComplete(onSuccess(req -> {
req.response().onComplete(onSuccess(resp -> {
AtomicInteger count = new AtomicInteger();
resp.exceptionHandler(t -> {
if (count.getAndIncrement() == 0) {
assertTrue(t instanceof TimeoutException);
assertEquals(expected, received);
complete();
}
});
resp.request().idleTimeout(500);
resp.handler(buff -> {
received.appendBuffer(buff);
// Force the internal timer to be rescheduled with the remaining amount of time
// e.g around 100 ms
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
}));
AtomicInteger count = new AtomicInteger();
req.exceptionHandler(t -> {
if (count.getAndIncrement() == 0) {
assertTrue(t instanceof TimeoutException);
assertEquals(expected, received);
complete();
}
});
req.writeHead();
}));
await();
}
@Test
public void testRequestTimesOutWhenIndicatedPeriodExpiresWithoutAResponseFromRemoteServer() throws Exception {
server.requestHandler(noOpHandler()); // No response handler so timeout triggers
AtomicBoolean failed = new AtomicBoolean();
startServer(testAddress);
client.request(new RequestOptions(requestOptions).setIdleTimeout(1000))
.compose(HttpClientRequest::send).onComplete(onFailure(t -> {
// Catch the first, the second is going to be a connection closed exception when the
// server is shutdown on testComplete
if (failed.compareAndSet(false, true)) {
testComplete();
}
}));
await();
}
@Test
public void testRequestTimeoutExtendedWhenResponseChunksReceived() throws Exception {
long timeout = 2000;
int numChunks = 100;
AtomicInteger count = new AtomicInteger(0);
long interval = timeout * 2 / numChunks;
server.requestHandler(req -> {
req.response().setChunked(true);
vertx.setPeriodic(interval, timerID -> {
req.response().write("foo");
if (count.incrementAndGet() == numChunks) {
req.response().end();
vertx.cancelTimer(timerID);
}
});
});
startServer(testAddress);
client.request(new RequestOptions(requestOptions).setIdleTimeout(timeout))
.compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::end))
.onComplete(onSuccess(v -> testComplete()));
await();
}
@Test
public void testRequestsTimeoutInQueue() throws Exception {
server.requestHandler(req -> {
vertx.setTimer(1000, id -> {
HttpServerResponse resp = req.response();
if (!resp.closed()) {
resp.end();
}
});
});
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(false), new PoolOptions().setHttp1MaxSize(1));
startServer(testAddress);
// Add a few requests that should all timeout
for (int i = 0; i < 5; i++) {
client.request(new RequestOptions(requestOptions).setIdleTimeout(500))
.compose(HttpClientRequest::send)
.onComplete(onFailure(t -> assertTrue(t instanceof TimeoutException)));
}
// Now another request that should not timeout
client.request(new RequestOptions(requestOptions).setIdleTimeout(3000))
.compose(HttpClientRequest::send)
.onComplete(onSuccess(resp -> {
assertEquals(200, resp.statusCode());
testComplete();
}));
await();
}
}
| HttpClientTimeoutTest |
java | apache__flink | flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/function/InternalTwoOutputWindowStreamProcessFunction.java | {
"start": 1657,
"end": 2022
} | class ____ wrap a {@link TwoOutputWindowStreamProcessFunction} to process function. This will
* be translated to a window operator instead of vanilla process operator.
*
* @param <IN> Type of the input elements.
* @param <OUT1> Type of the first output elements.
* @param <OUT2> Type of the second output elements.
* @param <W> Type of the window.
*/
public | that |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/language/TokenizerExpression.java | {
"start": 6808,
"end": 11285
} | class ____ extends AbstractBuilder<Builder, TokenizerExpression> {
private String token;
private String endToken;
private String inheritNamespaceTagName;
private String regex;
private String xml;
private String includeTokens;
private String group;
private String groupDelimiter;
private String skipFirst;
/**
* The (start) token to use as tokenizer, for example, you can use the new line token. You can use simple
* language as the token to support dynamic tokens.
*/
public Builder token(String token) {
this.token = token;
return this;
}
/**
* The end token to use as tokenizer if using start/end token pairs. You can use simple language as the token to
* support dynamic tokens.
*/
public Builder endToken(String endToken) {
this.endToken = endToken;
return this;
}
/**
* To inherit namespaces from a root/parent tag name when using XML, you can use simple language as the tag name
* to support dynamic names.
*/
public Builder inheritNamespaceTagName(String inheritNamespaceTagName) {
this.inheritNamespaceTagName = inheritNamespaceTagName;
return this;
}
/**
* If the token is a regular expression pattern.
* <p/>
* The default value is false
*/
public Builder regex(String regex) {
this.regex = regex;
return this;
}
/**
* If the token is a regular expression pattern.
* <p/>
* The default value is false
*/
public Builder regex(boolean regex) {
this.regex = Boolean.toString(regex);
return this;
}
/**
* Whether the input is XML messages. This option must be set to true if working with XML payloads.
*/
public Builder xml(String xml) {
this.xml = xml;
return this;
}
/**
* Whether the input is XML messages. This option must be set to true if working with XML payloads.
*/
public Builder xml(boolean xml) {
this.xml = Boolean.toString(xml);
return this;
}
/**
* Whether to include the tokens in the parts when using pairs
* <p/>
* The default value is false
*/
public Builder includeTokens(String includeTokens) {
this.includeTokens = includeTokens;
return this;
}
/**
* Whether to include the tokens in the parts when using pairs
* <p/>
* The default value is false
*/
public Builder includeTokens(boolean includeTokens) {
this.includeTokens = Boolean.toString(includeTokens);
return this;
}
/**
* To group N parts together, for example, to split big files into chunks of 1000 lines. You can use simple
* language as the group to support dynamic group sizes.
*/
public Builder group(String group) {
this.group = group;
return this;
}
/**
* To group N parts together, for example, to split big files into chunks of 1000 lines. You can use simple
* language as the group to support dynamic group sizes.
*/
public Builder group(int group) {
this.group = Integer.toString(group);
return this;
}
/**
* Sets the delimiter to use when grouping. If this has not been set, then the token will be used as the
* delimiter.
*/
public Builder groupDelimiter(String groupDelimiter) {
this.groupDelimiter = groupDelimiter;
return this;
}
/**
* To skip the very first element
*/
public Builder skipFirst(String skipFirst) {
this.skipFirst = skipFirst;
return this;
}
/**
* To skip the very first element
*/
public Builder skipFirst(boolean skipFirst) {
this.skipFirst = Boolean.toString(skipFirst);
return this;
}
@Override
public TokenizerExpression end() {
return new TokenizerExpression(this);
}
}
}
| Builder |
java | apache__kafka | shell/src/main/java/org/apache/kafka/shell/state/MetadataShellPublisher.java | {
"start": 1215,
"end": 1939
} | class ____ implements MetadataPublisher {
private static final Logger log = LoggerFactory.getLogger(MetadataShellPublisher.class);
private final MetadataShellState state;
public MetadataShellPublisher(MetadataShellState state) {
this.state = state;
}
@Override
public String name() {
return "MetadataShellPublisher";
}
@Override
public void onMetadataUpdate(
MetadataDelta delta,
MetadataImage newImage,
LoaderManifest manifest
) {
log.trace("onMetadataUpdate newImage={}", newImage);
state.setRoot(new RootShellNode(newImage));
}
public MetadataShellState state() {
return state;
}
}
| MetadataShellPublisher |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/write/WriterCommitMessage.java | {
"start": 1327,
"end": 1471
} | class ____ use it when
* generating messages at executor side and handling the messages at driver side.
*
* @since 3.0.0
*/
@Evolving
public | and |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDTService.java | {
"start": 1230,
"end": 1702
} | class ____ both the delegation binding
* code and the back end service created; allows for
* shared methods across both.
*
* The lifecycle sequence is as follows
* <pre>
* - create
* - bindToFileSystem(uri, ownerFS)
* - init
* - start
* ...api calls...
* - stop
* </pre>
*
* As the S3ADelegation mechanism is all configured during the filesystem
* initalize() operation, it is not ready for use through all the start process.
*/
public abstract | for |
java | google__dagger | javatests/dagger/internal/codegen/ComponentCreatorTest.java | {
"start": 24605,
"end": 25047
} | class ____ {",
" @Provides String s() { return \"\"; }",
"}");
Source componentFile =
javaFileBuilder("test.SimpleComponent")
.addLines(
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"",
"@Component(modules = TestModule.class)",
"abstract | TestModule |
java | grpc__grpc-java | services/src/test/java/io/grpc/protobuf/services/ProtoReflectionServiceV1Test.java | {
"start": 2671,
"end": 26758
} | class ____ {
@Rule
public GrpcCleanupRule grpcCleanupRule = new GrpcCleanupRule();
private static final String TEST_HOST = "localhost";
private MutableHandlerRegistry handlerRegistry = new MutableHandlerRegistry();
private BindableService reflectionService;
private ServerServiceDefinition dynamicService =
new DynamicServiceGrpc.DynamicServiceImplBase() {}.bindService();
private ServerServiceDefinition anotherDynamicService =
new AnotherDynamicServiceGrpc.AnotherDynamicServiceImplBase() {}.bindService();
private ServerReflectionGrpc.ServerReflectionStub stub;
@Before
public void setUp() throws Exception {
reflectionService = ProtoReflectionServiceV1.newInstance();
Server server =
InProcessServerBuilder.forName("proto-reflection-test")
.directExecutor()
.addService(reflectionService)
.addService(new ReflectableServiceGrpc.ReflectableServiceImplBase() {})
.fallbackHandlerRegistry(handlerRegistry)
.build()
.start();
grpcCleanupRule.register(server);
ManagedChannel channel =
grpcCleanupRule.register(
InProcessChannelBuilder.forName("proto-reflection-test").directExecutor().build());
stub = ServerReflectionGrpc.newStub(channel);
}
@Test
public void listServices() throws Exception {
Set<ServiceResponse> originalServices =
new HashSet<>(
Arrays.asList(
ServiceResponse.newBuilder()
.setName("grpc.reflection.v1.ServerReflection")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.ReflectableService")
.build()));
assertServiceResponseEquals(originalServices);
handlerRegistry.addService(dynamicService);
assertServiceResponseEquals(
new HashSet<>(
Arrays.asList(
ServiceResponse.newBuilder()
.setName("grpc.reflection.v1.ServerReflection")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.ReflectableService")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.DynamicService")
.build())));
handlerRegistry.addService(anotherDynamicService);
assertServiceResponseEquals(
new HashSet<>(
Arrays.asList(
ServiceResponse.newBuilder()
.setName("grpc.reflection.v1.ServerReflection")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.ReflectableService")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.DynamicService")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.AnotherDynamicService")
.build())));
handlerRegistry.removeService(dynamicService);
assertServiceResponseEquals(
new HashSet<>(
Arrays.asList(
ServiceResponse.newBuilder()
.setName("grpc.reflection.v1.ServerReflection")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.ReflectableService")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.AnotherDynamicService")
.build())));
handlerRegistry.removeService(anotherDynamicService);
assertServiceResponseEquals(originalServices);
}
@Test
public void fileByFilename() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileByFilename("io/grpc/reflection/testing/reflection_test_depth_three.proto")
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
assertEquals(goldenResponse, responseObserver.firstValue().get());
}
@Test
public void fileByFilenameConsistentForMutableServices() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileByFilename("io/grpc/reflection/testing/dynamic_reflection_test_depth_two.proto")
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
handlerRegistry.addService(dynamicService);
requestObserver.onNext(request);
requestObserver.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver2 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver2 =
stub.serverReflectionInfo(responseObserver2);
handlerRegistry.removeService(dynamicService);
requestObserver2.onNext(request);
requestObserver2.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver3 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver3 =
stub.serverReflectionInfo(responseObserver3);
requestObserver3.onNext(request);
requestObserver3.onCompleted();
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver.firstValue().get().getMessageResponseCase());
assertEquals(goldenResponse, responseObserver2.firstValue().get());
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver3.firstValue().get().getMessageResponseCase());
}
@Test
public void fileContainingSymbol() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingSymbol("grpc.reflection.testing.ReflectableService.Method")
.build();
List<ByteString> goldenResponse =
Arrays.asList(
ReflectionTestProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthTwoAlternateProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString());
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
List<ByteString> response =
responseObserver
.firstValue()
.get()
.getFileDescriptorResponse()
.getFileDescriptorProtoList();
assertEquals(goldenResponse.size(), response.size());
assertEquals(new HashSet<>(goldenResponse), new HashSet<>(response));
}
@Test
public void fileContainingNestedSymbol() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingSymbol("grpc.reflection.testing.NestedTypeOuter.Middle.Inner")
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
assertEquals(goldenResponse, responseObserver.firstValue().get());
}
@Test
public void fileContainingSymbolForMutableServices() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingSymbol("grpc.reflection.testing.DynamicRequest")
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
handlerRegistry.addService(dynamicService);
requestObserver.onNext(request);
requestObserver.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver2 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver2 =
stub.serverReflectionInfo(responseObserver2);
handlerRegistry.removeService(dynamicService);
requestObserver2.onNext(request);
requestObserver2.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver3 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver3 =
stub.serverReflectionInfo(responseObserver3);
requestObserver3.onNext(request);
requestObserver3.onCompleted();
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver.firstValue().get().getMessageResponseCase());
assertEquals(goldenResponse, responseObserver2.firstValue().get());
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver3.firstValue().get().getMessageResponseCase());
}
@Test
public void fileContainingExtension() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingExtension(
ExtensionRequest.newBuilder()
.setContainingType("grpc.reflection.testing.ThirdLevelType")
.setExtensionNumber(100)
.build())
.build();
List<ByteString> goldenResponse =
Arrays.asList(
ReflectionTestProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthTwoAlternateProto.getDescriptor().toProto().toByteString(),
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString());
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
List<ByteString> response =
responseObserver
.firstValue()
.get()
.getFileDescriptorResponse()
.getFileDescriptorProtoList();
assertEquals(goldenResponse.size(), response.size());
assertEquals(new HashSet<>(goldenResponse), new HashSet<>(response));
}
@Test
public void fileContainingNestedExtension() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingExtension(
ExtensionRequest.newBuilder()
.setContainingType("grpc.reflection.testing.ThirdLevelType")
.setExtensionNumber(101)
.build())
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString())
.addFileDescriptorProto(
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
assertEquals(goldenResponse, responseObserver.firstValue().get());
}
@Test
public void fileContainingExtensionForMutableServices() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileContainingExtension(
ExtensionRequest.newBuilder()
.setContainingType("grpc.reflection.testing.TypeWithExtensions")
.setExtensionNumber(200)
.build())
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString())
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
handlerRegistry.addService(dynamicService);
requestObserver.onNext(request);
requestObserver.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver2 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver2 =
stub.serverReflectionInfo(responseObserver2);
handlerRegistry.removeService(dynamicService);
requestObserver2.onNext(request);
requestObserver2.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver3 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver3 =
stub.serverReflectionInfo(responseObserver3);
requestObserver3.onNext(request);
requestObserver3.onCompleted();
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver.firstValue().get().getMessageResponseCase());
assertEquals(goldenResponse, responseObserver2.firstValue().get());
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver3.firstValue().get().getMessageResponseCase());
}
@Test
public void allExtensionNumbersOfType() throws Exception {
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setAllExtensionNumbersOfType("grpc.reflection.testing.ThirdLevelType")
.build();
Set<Integer> goldenResponse = new HashSet<>(Arrays.asList(100, 101));
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
Set<Integer> extensionNumberResponseSet =
new HashSet<>(
responseObserver
.firstValue()
.get()
.getAllExtensionNumbersResponse()
.getExtensionNumberList());
assertEquals(goldenResponse, extensionNumberResponseSet);
}
@Test
public void allExtensionNumbersOfTypeForMutableServices() throws Exception {
String type = "grpc.reflection.testing.TypeWithExtensions";
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setAllExtensionNumbersOfType(type)
.build();
ServerReflectionResponse goldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(request)
.setAllExtensionNumbersResponse(
ExtensionNumberResponse.newBuilder()
.setBaseTypeName(type)
.addExtensionNumber(200)
.build())
.build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub.serverReflectionInfo(responseObserver);
handlerRegistry.addService(dynamicService);
requestObserver.onNext(request);
requestObserver.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver2 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver2 =
stub.serverReflectionInfo(responseObserver2);
handlerRegistry.removeService(dynamicService);
requestObserver2.onNext(request);
requestObserver2.onCompleted();
StreamRecorder<ServerReflectionResponse> responseObserver3 = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver3 =
stub.serverReflectionInfo(responseObserver3);
requestObserver3.onNext(request);
requestObserver3.onCompleted();
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver.firstValue().get().getMessageResponseCase());
assertEquals(goldenResponse, responseObserver2.firstValue().get());
assertEquals(
ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE,
responseObserver3.firstValue().get().getMessageResponseCase());
}
@Test
public void sharedServiceBetweenServers()
throws IOException, ExecutionException, InterruptedException {
Server anotherServer = InProcessServerBuilder.forName("proto-reflection-test-2")
.directExecutor()
.addService(reflectionService)
.addService(new AnotherReflectableServiceGrpc.AnotherReflectableServiceImplBase() {})
.build()
.start();
grpcCleanupRule.register(anotherServer);
ManagedChannel anotherChannel = grpcCleanupRule.register(
InProcessChannelBuilder.forName("proto-reflection-test-2").directExecutor().build());
ServerReflectionGrpc.ServerReflectionStub stub2 = ServerReflectionGrpc.newStub(anotherChannel);
ServerReflectionRequest request =
ServerReflectionRequest.newBuilder().setHost(TEST_HOST).setListServices("services").build();
StreamRecorder<ServerReflectionResponse> responseObserver = StreamRecorder.create();
StreamObserver<ServerReflectionRequest> requestObserver =
stub2.serverReflectionInfo(responseObserver);
requestObserver.onNext(request);
requestObserver.onCompleted();
List<ServiceResponse> response =
responseObserver.firstValue().get().getListServicesResponse().getServiceList();
assertEquals(new HashSet<>(
Arrays.asList(
ServiceResponse.newBuilder()
.setName("grpc.reflection.v1.ServerReflection")
.build(),
ServiceResponse.newBuilder()
.setName("grpc.reflection.testing.AnotherReflectableService")
.build())),
new HashSet<>(response));
}
@Test
public void flowControl() throws Exception {
FlowControlClientResponseObserver clientResponseObserver =
new FlowControlClientResponseObserver();
ClientCallStreamObserver<ServerReflectionRequest> requestObserver =
(ClientCallStreamObserver<ServerReflectionRequest>)
stub.serverReflectionInfo(clientResponseObserver);
// Verify we don't receive a response until we request it.
requestObserver.onNext(flowControlRequest);
assertEquals(0, clientResponseObserver.getResponses().size());
requestObserver.request(1);
assertEquals(1, clientResponseObserver.getResponses().size());
assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(0));
// Verify we don't receive an additional response until we request it.
requestObserver.onNext(flowControlRequest);
assertEquals(1, clientResponseObserver.getResponses().size());
requestObserver.request(1);
assertEquals(2, clientResponseObserver.getResponses().size());
assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(1));
requestObserver.onCompleted();
assertTrue(clientResponseObserver.onCompleteCalled());
}
@Test
public void flowControlOnCompleteWithPendingRequest() throws Exception {
FlowControlClientResponseObserver clientResponseObserver =
new FlowControlClientResponseObserver();
ClientCallStreamObserver<ServerReflectionRequest> requestObserver =
(ClientCallStreamObserver<ServerReflectionRequest>)
stub.serverReflectionInfo(clientResponseObserver);
requestObserver.onNext(flowControlRequest);
requestObserver.onCompleted();
assertEquals(0, clientResponseObserver.getResponses().size());
assertFalse(clientResponseObserver.onCompleteCalled());
requestObserver.request(1);
assertTrue(clientResponseObserver.onCompleteCalled());
assertEquals(1, clientResponseObserver.getResponses().size());
assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(0));
}
private final ServerReflectionRequest flowControlRequest =
ServerReflectionRequest.newBuilder()
.setHost(TEST_HOST)
.setFileByFilename("io/grpc/reflection/testing/reflection_test_depth_three.proto")
.build();
private final ServerReflectionResponse flowControlGoldenResponse =
ServerReflectionResponse.newBuilder()
.setValidHost(TEST_HOST)
.setOriginalRequest(flowControlRequest)
.setFileDescriptorResponse(
FileDescriptorResponse.newBuilder()
.addFileDescriptorProto(
ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString())
.build())
.build();
private static | ProtoReflectionServiceV1Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java | {
"start": 2050,
"end": 3610
} | class ____ extends UnreliableException {
private static final long serialVersionUID = 1L;
// no body
}
void alwaysSucceeds() throws UnreliableException;
void alwaysFailsWithFatalException() throws FatalException;
void alwaysFailsWithRemoteFatalException() throws RemoteException;
void failsOnceWithIOException() throws IOException;
void failsOnceWithRemoteException() throws RemoteException;
void failsOnceThenSucceeds() throws UnreliableException;
boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
void failsTenTimesThenSucceeds() throws UnreliableException;
void failsWithSASLExceptionTenTimes() throws SaslException;
@Idempotent
void failsWithAccessControlExceptionEightTimes()
throws AccessControlException;
@Idempotent
void failsWithWrappedAccessControlException()
throws IOException;
public String succeedsOnceThenFailsReturningString()
throws UnreliableException, StandbyException, IOException;
@Idempotent
public String succeedsOnceThenFailsReturningStringIdempotent()
throws UnreliableException, StandbyException, IOException;
public String succeedsTenTimesThenFailsReturningString()
throws UnreliableException, StandbyException, IOException;
@Idempotent
public String failsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException;
void nonIdempotentVoidFailsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException;
}
| FatalException |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/PublicSuperclass.java | {
"start": 738,
"end": 1125
} | class ____ {
public int process(int num) {
return num + 1;
}
public int getNumber() {
return 1;
}
public String getMessage() {
return "goodbye";
}
public String greet(String name) {
return "Super, " + name;
}
public String getIndex(int index) {
return "value-" + index;
}
public String getIndex2(int index) {
return "value-" + (2 * index);
}
}
| PublicSuperclass |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2674/ErroneousSourceTargetMapping.java | {
"start": 374,
"end": 781
} | interface ____ {
ErroneousSourceTargetMapping INSTANCE = Mappers.getMapper( ErroneousSourceTargetMapping.class );
@BeforeMapping
void beforeMappingMethod(Target target, @MappingTarget Source source);
@AfterMapping
void afterMappingMethod(Source source, @MappingTarget Target target);
Target toTarget(Source source);
Source toSource(Target target);
}
| ErroneousSourceTargetMapping |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest91.java | {
"start": 245,
"end": 1629
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE tbl_name(id int, sid int, name varchar(8)) " +
"PARTITION BY LINEAR KEY ALGORITHM=2 (id, sid) PARTITIONS 4 (PARTITION p0, PARTITION p1, PARTITION p2, PARTITION p3)";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
assertEquals(1, statementList.size());
}
public void test_1() throws Exception {
String sql = "CREATE TABLE tbl_name(id int, sid int, name varchar(8)) " +
"PARTITION BY LINEAR KEY ALGORITHM=2 (id, sid) PARTITIONS 4 " +
"SUBPARTITION BY LINEAR KEY ALGORITHM=2 (id, sid) SUBPARTITIONS 2 " +
"(PARTITION p0 (SUBPARTITION s0, SUBPARTITION s1), " +
"PARTITION p1 (SUBPARTITION s0, SUBPARTITION s1), " +
"PARTITION p2 (SUBPARTITION s0, SUBPARTITION s1), " +
"PARTITION p3 (SUBPARTITION s0, SUBPARTITION s1))";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
assertEquals(1, statementList.size());
}
}
| MySqlCreateTableTest91 |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java | {
"start": 935,
"end": 1345
} | class ____ extends AbstractContractAppendTest {
@BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| TestHDFSContractAppend |
java | spring-projects__spring-boot | module/spring-boot-jackson/src/test/java/org/springframework/boot/jackson/autoconfigure/JacksonAutoConfigurationTests.java | {
"start": 34754,
"end": 34962
} | class ____ {
@Bean
@Primary
ObjectMapper objectMapper() {
return mock(ObjectMapper.class);
}
}
@Configuration(proxyBeanMethods = false)
@Import(BazSerializer.class)
static | MockObjectMapperConfig |
java | apache__kafka | connect/mirror/src/main/java/org/apache/kafka/connect/mirror/Scheduler.java | {
"start": 3521,
"end": 4682
} | interface ____ {
void run() throws InterruptedException, ExecutionException;
}
private void run(Task task, String description, boolean checkTimeout) {
try {
long start = System.currentTimeMillis();
task.run();
long elapsed = System.currentTimeMillis() - start;
LOG.info("{} took {} ms", description, elapsed);
if (checkTimeout && elapsed > timeout.toMillis()) {
LOG.warn("{} took too long ({} ms) running task: {}", name, elapsed, description);
}
} catch (InterruptedException e) {
LOG.warn("{} was interrupted running task: {}", name, description);
} catch (Throwable e) {
LOG.error("{} caught exception in scheduled task: {}", name, description, e);
}
}
private void executeThread(Task task, String description, boolean checkTimeout) {
Thread.currentThread().setName(name + "-" + description);
if (closed) {
LOG.info("{} skipping task due to shutdown: {}", name, description);
return;
}
run(task, description, checkTimeout);
}
}
| Task |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java | {
"start": 557,
"end": 610
} | class ____ build {@link SearchHit} in tests
*/
public | to |
java | apache__camel | components/camel-netty/src/main/java/org/apache/camel/component/netty/NettyWorkerPoolBuilder.java | {
"start": 1399,
"end": 3498
} | class ____ {
private String name = "NettyWorker";
private String pattern;
private int workerCount;
private boolean nativeTransport;
private volatile EventLoopGroup workerPool;
public void setName(String name) {
this.name = name;
}
public void setPattern(String pattern) {
this.pattern = pattern;
}
public void setWorkerCount(int workerCount) {
this.workerCount = workerCount;
}
public void setNativeTransport(boolean nativeTransport) {
this.nativeTransport = nativeTransport;
}
public NettyWorkerPoolBuilder withName(String name) {
setName(name);
return this;
}
public NettyWorkerPoolBuilder withPattern(String pattern) {
setPattern(pattern);
return this;
}
public NettyWorkerPoolBuilder withWorkerCount(int workerCount) {
setWorkerCount(workerCount);
return this;
}
public NettyWorkerPoolBuilder withNativeTransport(boolean nativeTransport) {
setNativeTransport(nativeTransport);
return this;
}
/**
* Creates a new worker pool.
*/
public EventLoopGroup build() {
if (nativeTransport) {
if (KQueue.isAvailable()) {
workerPool = new KQueueEventLoopGroup(workerCount, new CamelThreadFactory(pattern, name, false));
} else if (Epoll.isAvailable()) {
workerPool = new EpollEventLoopGroup(workerCount, new CamelThreadFactory(pattern, name, false));
} else {
throw new IllegalStateException(
"Unable to use native transport - both Epoll and KQueue are not available");
}
} else {
workerPool = new NioEventLoopGroup(workerCount, new CamelThreadFactory(pattern, name, false));
}
return workerPool;
}
/**
* Shutdown the created worker pool
*/
public void destroy() {
if (workerPool != null) {
workerPool.shutdownGracefully();
workerPool = null;
}
}
}
| NettyWorkerPoolBuilder |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java | {
"start": 39449,
"end": 39824
} | class ____ apply a project-id to another object.
*/
protected record ProjectScoped<T>(ProjectId projectId, T value) {
protected ProjectScoped {
Objects.requireNonNull(projectId);
}
@Override
public String toString() {
return getClass().getSimpleName() + '<' + projectId + ">{" + value + "}";
}
}
}
| to |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3905/OverrideDto.java | {
"start": 232,
"end": 415
} | class ____ {
private final String name;
public OverrideDto(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
| OverrideDto |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NonOverridingEqualsTest.java | {
"start": 6724,
"end": 7057
} | class ____ {
public boolean equals(Test other, String s) {
return false;
}
}
""")
.doTest();
}
@Test
public void dontFlagIfWrongReturnType() {
compilationHelper
.addSourceLines(
"Test.java",
"""
public | Test |
java | apache__camel | components/camel-jpa/src/test/java/org/apache/camel/processor/jpa/JpaProducerPassingEntityManagerTest.java | {
"start": 1381,
"end": 3666
} | class ____ extends AbstractJpaTest {
protected static final String SELECT_ALL_STRING = "select x from " + SendEmail.class.getName() + " x";
@Test
public void testRouteJpa() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
context.getRouteController().startRoute("foo");
JpaEndpoint jpa = context.getEndpoint("jpa://" + SendEmail.class.getName(), JpaEndpoint.class);
EntityManagerFactory emf = jpa.getEntityManagerFactory();
// The entity instance is different if it is retrieved from different EntityManager instance
EntityManager entityManager = emf.createEntityManager();
template.sendBody("direct:start", new SendEmail("foo@beer.org"));
Exchange exchange = mock.getReceivedExchanges().get(0);
SendEmail persistedEntity = exchange.getIn().getBody(SendEmail.class);
SendEmail emfindEntity = entityManager.find(SendEmail.class, persistedEntity.getId());
assertNotSame(emfindEntity, persistedEntity);
entityManager.close();
mock.reset();
// The same EntityManager returns same entity instance from its 1st level cache
entityManager = emf.createEntityManager();
template.sendBodyAndHeader("direct:start", new SendEmail("bar@beer.org"), JpaConstants.ENTITY_MANAGER, entityManager);
exchange = mock.getReceivedExchanges().get(0);
persistedEntity = exchange.getIn().getBody(SendEmail.class);
emfindEntity = entityManager.find(SendEmail.class, persistedEntity.getId());
assertSame(emfindEntity, persistedEntity);
entityManager.close();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start")
.id("foo")
.to("jpa://" + SendEmail.class.getName() + "?usePassedInEntityManager=true")
.to("mock:result");
}
};
}
@Override
protected String routeXml() {
return "org/apache/camel/processor/jpa/springJpaRouteTest.xml";
}
@Override
protected String selectAllString() {
return SELECT_ALL_STRING;
}
}
| JpaProducerPassingEntityManagerTest |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/jaas/JaasAuthenticationProvider.java | {
"start": 5977,
"end": 10319
} | class ____ extends AbstractJaasAuthenticationProvider {
// exists for passivity
protected static final Log log = LogFactory.getLog(JaasAuthenticationProvider.class);
@SuppressWarnings("NullAway.Init")
private Resource loginConfig;
private boolean refreshConfigurationOnStartup = true;
@Override
public void afterPropertiesSet() throws Exception {
// the superclass is not called because it does additional checks that are
// non-passive
Assert.hasLength(getLoginContextName(), () -> "loginContextName must be set on " + getClass());
Assert.notNull(this.loginConfig, () -> "loginConfig must be set on " + getClass());
configureJaas(this.loginConfig);
Assert.notNull(Configuration.getConfiguration(),
"As per https://java.sun.com/j2se/1.5.0/docs/api/javax/security/auth/login/Configuration.html "
+ "\"If a Configuration object was set via the Configuration.setConfiguration method, then that object is "
+ "returned. Otherwise, a default Configuration object is returned\". Your JRE returned null to "
+ "Configuration.getConfiguration().");
}
@Override
protected LoginContext createLoginContext(CallbackHandler handler) throws LoginException {
return new LoginContext(getLoginContextName(), handler);
}
/**
* Hook method for configuring Jaas.
* @param loginConfig URL to Jaas login configuration
* @throws IOException if there is a problem reading the config resource.
*/
protected void configureJaas(Resource loginConfig) throws IOException {
configureJaasUsingLoop();
if (this.refreshConfigurationOnStartup) {
// Overcome issue in SEC-760
Configuration.getConfiguration().refresh();
}
}
/**
* Loops through the login.config.url.1,login.config.url.2 properties looking for the
* login configuration. If it is not set, it will be set to the last available
* login.config.url.X property.
*
*/
private void configureJaasUsingLoop() throws IOException {
String loginConfigUrl = convertLoginConfigToUrl();
boolean alreadySet = false;
int n = 1;
final String prefix = "login.config.url.";
String existing;
while ((existing = Security.getProperty(prefix + n)) != null) {
alreadySet = existing.equals(loginConfigUrl);
if (alreadySet) {
break;
}
n++;
}
if (!alreadySet) {
String key = prefix + n;
log.debug(LogMessage.format("Setting security property [%s] to: %s", key, loginConfigUrl));
Security.setProperty(key, loginConfigUrl);
}
}
private String convertLoginConfigToUrl() throws IOException {
String loginConfigPath;
try {
loginConfigPath = this.loginConfig.getFile().getAbsolutePath().replace(File.separatorChar, '/');
if (!loginConfigPath.startsWith("/")) {
loginConfigPath = "/" + loginConfigPath;
}
return new URL("file", "", loginConfigPath).toString();
}
catch (IOException ex) {
// SEC-1700: May be inside a jar
return this.loginConfig.getURL().toString();
}
}
/**
* Publishes the {@link JaasAuthenticationFailedEvent}. Can be overridden by
* subclasses for different functionality
* @param token The authentication token being processed
* @param ase The exception that caused the authentication failure
*/
@Override
protected void publishFailureEvent(UsernamePasswordAuthenticationToken token, AuthenticationException ase) {
// exists for passivity (the superclass does a null check before publishing)
getApplicationEventPublisher().publishEvent(new JaasAuthenticationFailedEvent(token, ase));
}
public Resource getLoginConfig() {
return this.loginConfig;
}
/**
* Set the JAAS login configuration file.
* @param loginConfig
*
* @see <a href=
* "https://java.sun.com/j2se/1.5.0/docs/guide/security/jaas/JAASRefGuide.html">JAAS
* Reference</a>
*/
public void setLoginConfig(Resource loginConfig) {
this.loginConfig = loginConfig;
}
/**
* If set, a call to {@code Configuration#refresh()} will be made by
* {@code #configureJaas(Resource) } method. Defaults to {@code true}.
* @param refresh set to {@code false} to disable reloading of the configuration. May
* be useful in some environments.
* @see <a href="https://jira.springsource.org/browse/SEC-1320">SEC-1320</a>
*/
public void setRefreshConfigurationOnStartup(boolean refresh) {
this.refreshConfigurationOnStartup = refresh;
}
}
| JaasAuthenticationProvider |
java | alibaba__nacos | core/src/main/java/io/grpc/netty/shaded/io/grpc/netty/NettyChannelHelper.java | {
"start": 961,
"end": 1598
} | class ____ {
private static final ReferenceFieldUpdater<NettyServerStream, WriteQueue> WRITE_QUEUE_GETTER = Updaters.newReferenceFieldUpdater(
NettyServerStream.class, "writeQueue");
private static final ReferenceFieldUpdater<WriteQueue, Channel> CHANNEL_GETTER = Updaters.newReferenceFieldUpdater(
WriteQueue.class, "channel");
public static Channel getChannel(final ServerStream stream) {
if (stream instanceof NettyServerStream) {
return CHANNEL_GETTER.get(WRITE_QUEUE_GETTER.get((NettyServerStream) stream));
}
return null;
}
}
| NettyChannelHelper |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/resolver/MavenMetadata.java | {
"start": 1537,
"end": 4414
} | class ____ extends AbstractMetadata implements MergeableMetadata {
static final String MAVEN_METADATA_XML = "maven-metadata.xml";
static DateTimeFormatter fmt;
static {
fmt = DateTimeFormatter.ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC);
}
protected Metadata metadata;
private final Path path;
protected final Instant timestamp;
private boolean merged;
@Deprecated
protected MavenMetadata(Metadata metadata, File file, Instant timestamp) {
this(metadata, file != null ? file.toPath() : null, timestamp);
}
protected MavenMetadata(Metadata metadata, Path path, Instant timestamp) {
this.metadata = metadata;
this.path = path;
this.timestamp = timestamp;
}
@Override
public String getType() {
return MAVEN_METADATA_XML;
}
@Deprecated
@Override
public File getFile() {
return path != null ? path.toFile() : null;
}
@Override
public Path getPath() {
return path;
}
@Deprecated
@Override
public void merge(File existing, File result) throws RepositoryException {
merge(existing != null ? existing.toPath() : null, result != null ? result.toPath() : null);
}
@Override
public void merge(Path existing, Path result) throws RepositoryException {
Metadata recessive = read(existing);
merge(recessive);
write(result, metadata);
merged = true;
}
@Override
public boolean isMerged() {
return merged;
}
protected abstract void merge(Metadata recessive);
static Metadata read(Path metadataPath) throws RepositoryException {
if (!Files.exists(metadataPath)) {
return Metadata.newInstance();
}
try (InputStream input = Files.newInputStream(metadataPath)) {
return new MetadataStaxReader().read(input, false);
} catch (IOException | XMLStreamException e) {
throw new RepositoryException("Could not parse metadata " + metadataPath + ": " + e.getMessage(), e);
}
}
private void write(Path metadataPath, Metadata metadata) throws RepositoryException {
try {
Files.createDirectories(metadataPath.getParent());
try (OutputStream output = Files.newOutputStream(metadataPath)) {
new MetadataStaxWriter().write(output, metadata);
}
} catch (IOException | XMLStreamException e) {
throw new RepositoryException("Could not write metadata " + metadataPath + ": " + e.getMessage(), e);
}
}
@Override
public Map<String, String> getProperties() {
return Collections.emptyMap();
}
@Override
public org.eclipse.aether.metadata.Metadata setProperties(Map<String, String> properties) {
return this;
}
}
| MavenMetadata |
java | spring-projects__spring-framework | spring-r2dbc/src/test/java/org/springframework/r2dbc/connection/init/AbstractDatabasePopulatorTests.java | {
"start": 1119,
"end": 3788
} | class ____ {
ClassRelativeResourceLoader resourceLoader = new ClassRelativeResourceLoader(getClass());
ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator();
@Test
void scriptWithSingleLineCommentsAndFailedDrop() {
databasePopulator.addScript(resource("db-schema-failed-drop-comments.sql"));
databasePopulator.addScript(resource("db-test-data.sql"));
databasePopulator.setIgnoreFailedDrops(true);
runPopulator();
assertUsersDatabaseCreated("Heisenberg");
}
@Test
void scriptWithStandardEscapedLiteral() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-escaped-literal.sql"));
runPopulator();
assertUsersDatabaseCreated("'Heisenberg'");
}
@Test
void scriptWithMySqlEscapedLiteral() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-mysql-escaped-literal.sql"));
runPopulator();
assertUsersDatabaseCreated("\\$Heisenberg\\$");
}
@Test
void scriptWithMultipleStatements() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-multiple.sql"));
runPopulator();
assertUsersDatabaseCreated("Heisenberg", "Jesse");
}
@Test
void scriptWithMultipleStatementsAndLongSeparator() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-endings.sql"));
databasePopulator.setSeparator("@@");
runPopulator();
assertUsersDatabaseCreated("Heisenberg", "Jesse");
}
private void runPopulator() {
databasePopulator.populate(getConnectionFactory()) //
.as(StepVerifier::create) //
.verifyComplete();
}
void assertUsersDatabaseCreated(String... lastNames) {
assertUsersDatabaseCreated(getConnectionFactory(), lastNames);
}
void assertUsersDatabaseCreated(ConnectionFactory connectionFactory,String... lastNames) {
DatabaseClient client = DatabaseClient.create(connectionFactory);
for (String lastName : lastNames) {
client.sql("select count(0) from users where last_name = :name") //
.bind("name", lastName) //
.map((row, metadata) -> row.get(0)) //
.first() //
.map(number -> ((Number) number).intValue()) //
.as(StepVerifier::create) //
.expectNext(1).as("Did not find user with last name [" + lastName + "].") //
.verifyComplete();
}
}
abstract ConnectionFactory getConnectionFactory();
Resource resource(String path) {
return resourceLoader.getResource(path);
}
Resource defaultSchema() {
return resource("db-schema.sql");
}
Resource usersSchema() {
return resource("users-schema.sql");
}
}
| AbstractDatabasePopulatorTests |
java | spring-projects__spring-boot | module/spring-boot-data-mongodb-test/src/dockerTest/java/org/springframework/boot/data/mongodb/test/autoconfigure/ExampleService.java | {
"start": 927,
"end": 1215
} | class ____ {
private final MongoTemplate mongoTemplate;
public ExampleService(MongoTemplate mongoTemplate) {
this.mongoTemplate = mongoTemplate;
}
public boolean hasCollection(String collectionName) {
return this.mongoTemplate.collectionExists(collectionName);
}
}
| ExampleService |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiUnifiedChatCompletionResponseHandlerTests.java | {
"start": 1256,
"end": 5432
} | class ____ extends ESTestCase {
private final OpenAiUnifiedChatCompletionResponseHandler responseHandler = new OpenAiUnifiedChatCompletionResponseHandler(
"chat completions",
(a, b) -> mock()
);
public void testFailValidationWithAllFields() throws IOException {
var responseJson = """
{
"error": {
"type": "not_found_error",
"message": "a message",
"code": "ahh",
"param": "model"
}
}
""";
var errorJson = invalidResponseJson(responseJson);
assertThat(errorJson, is("""
{"error":{"code":"ahh","message":"Received a server error status code for request from inference entity id [abc] status [500]. \
Error message: [a message]","param":"model","type":"not_found_error"}}"""));
}
public void testFailValidationWithoutOptionalFields() throws IOException {
var responseJson = """
{
"error": {
"type": "not_found_error",
"message": "a message"
}
}
""";
var errorJson = invalidResponseJson(responseJson);
@SuppressWarnings("checkstyle:LineLength")
var expectedError = XContentHelper.stripWhitespace(
"""
{
"error":{
"code":"bad_request",
"message":"Received a server error status code for request from inference entity id [abc] status [500]. Error message: [a message]",
"type":"not_found_error"
}
}"""
);
assertThat(errorJson, is(expectedError));
}
public void testFailValidationWithInvalidJson() throws IOException {
var responseJson = """
what? this isn't a json
""";
var errorJson = invalidResponseJson(responseJson);
assertThat(errorJson, is("""
{"error":{"code":"bad_request","message":"Received a server error status code for request from inference entity id [abc] status\
[500]","type":"UnifiedChatCompletionErrorResponse"}}"""));
}
private String invalidResponseJson(String responseJson) throws IOException {
var exception = invalidResponse(responseJson);
assertThat(exception, isA(RetryException.class));
assertThat(unwrapCause(exception), isA(UnifiedChatCompletionException.class));
return toJson((UnifiedChatCompletionException) unwrapCause(exception));
}
private Exception invalidResponse(String responseJson) {
return expectThrows(
RetryException.class,
() -> responseHandler.validateResponse(
mock(),
mock(),
mockRequest(),
new HttpResult(mock500Response(), responseJson.getBytes(StandardCharsets.UTF_8))
)
);
}
private static Request mockRequest() {
var request = mock(Request.class);
when(request.getInferenceEntityId()).thenReturn("abc");
when(request.isStreaming()).thenReturn(true);
return request;
}
private static HttpResponse mock500Response() {
int statusCode = 500;
var statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(statusCode);
var response = mock(HttpResponse.class);
when(response.getStatusLine()).thenReturn(statusLine);
return response;
}
private String toJson(UnifiedChatCompletionException e) throws IOException {
try (var builder = XContentFactory.jsonBuilder()) {
e.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xContent -> {
try {
xContent.toXContent(builder, EMPTY_PARAMS);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
});
return XContentHelper.convertToJson(BytesReference.bytes(builder), false, builder.contentType());
}
}
}
| OpenAiUnifiedChatCompletionResponseHandlerTests |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/NlpTokenizer.java | {
"start": 1614,
"end": 24723
} | class ____ implements Releasable {
public static final int CALC_DEFAULT_SPAN_VALUE = -2;
abstract int clsTokenId();
abstract int sepTokenId();
abstract int maxSequenceLength();
abstract boolean isWithSpecialTokens();
abstract int numExtraTokensForSingleSequence();
abstract int getNumExtraTokensForSeqPair();
int defaultSpanForChunking(int maxWindowSize) {
return (maxWindowSize - numExtraTokensForSingleSequence()) / 2;
}
public abstract TokenizationResult buildTokenizationResult(List<TokenizationResult.Tokens> tokenizations);
/**
* Tokenize the input according to the basic tokenization
* options then perform the configured tokenization with the given vocabulary.
*
* The result is the tokens ids, a map of the
* token position to the position of the token in the source for
* each input string grouped into a {@link Tokenization}.
*
* @param seq Text to tokenize
* @param truncate
* @param span
* @param sequenceId
* @param windowSize
* @return A list of {@link Tokenization}
*/
public final List<TokenizationResult.Tokens> tokenize(
String seq,
Tokenization.Truncate truncate,
int span,
int sequenceId,
Integer windowSize
) {
if (windowSize == null) {
windowSize = maxSequenceLength();
}
var innerResult = innerTokenize(seq);
List<? extends DelimitedToken.Encoded> tokenIds = innerResult.tokens();
List<Integer> tokenPositionMap = innerResult.tokenPositionMap();
int numTokens = isWithSpecialTokens() ? tokenIds.size() + numExtraTokensForSingleSequence() : tokenIds.size();
boolean isTruncated = false;
if (numTokens > windowSize) {
switch (truncate) {
case FIRST, SECOND, BALANCED -> { // only one sequence exists in this case
isTruncated = true;
tokenIds = tokenIds.subList(0, isWithSpecialTokens() ? windowSize - numExtraTokensForSingleSequence() : windowSize);
tokenPositionMap = tokenPositionMap.subList(
0,
isWithSpecialTokens() ? windowSize - numExtraTokensForSingleSequence() : windowSize
);
}
case NONE -> {
if (span == -1) {
throw ExceptionsHelper.badRequestException(
"Input too large. The tokenized input length [{}] exceeds the maximum sequence length [{}]",
numTokens,
windowSize
);
}
}
}
}
if (numTokens <= windowSize || span == -1) {
return List.of(
createTokensBuilder(clsTokenId(), sepTokenId(), isWithSpecialTokens()).addSequence(
tokenIds.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList()),
tokenPositionMap
).build(seq, isTruncated, innerResult.tokens, -1, sequenceId)
);
}
if (span == CALC_DEFAULT_SPAN_VALUE) {
span = defaultSpanForChunking(windowSize);
}
List<TokenizationResult.Tokens> toReturn = new ArrayList<>();
int splitEndPos = 0;
int splitStartPos = 0;
int spanPrev = -1;
while (splitEndPos < tokenIds.size()) {
splitEndPos = min(
splitStartPos + (isWithSpecialTokens() ? windowSize - numExtraTokensForSingleSequence() : windowSize),
tokenIds.size()
);
// Make sure we do not end on a word
if (splitEndPos != tokenIds.size()) {
while (splitEndPos > splitStartPos + 1
&& Objects.equals(tokenPositionMap.get(splitEndPos), tokenPositionMap.get(splitEndPos - 1))) {
splitEndPos--;
}
}
toReturn.add(
createTokensBuilder(clsTokenId(), sepTokenId(), isWithSpecialTokens()).addSequence(
tokenIds.subList(splitStartPos, splitEndPos)
.stream()
.map(DelimitedToken.Encoded::getEncoding)
.collect(Collectors.toList()),
tokenPositionMap.subList(splitStartPos, splitEndPos)
).build(seq, false, tokenIds.subList(splitStartPos, splitEndPos), spanPrev, sequenceId)
);
spanPrev = span;
int prevSplitStart = splitStartPos;
splitStartPos = splitEndPos - span;
// try to back up our split so that it starts at the first whole word
if (splitStartPos < tokenIds.size()) {
while (splitStartPos > (prevSplitStart + 1)
&& Objects.equals(tokenPositionMap.get(splitStartPos), tokenPositionMap.get(splitStartPos - 1))) {
splitStartPos--;
spanPrev++;
}
}
}
return toReturn;
}
/**
* Tokenize the sequence pair
* @param seq1 The first sequence in the pair
* @param seq2 The second sequence
* @param truncate truncate settings
* @param sequenceId The unique id for this tokenization request
* @return tokenization result for the sequence pair
*/
public TokenizationResult.Tokens tokenize(String seq1, String seq2, Tokenization.Truncate truncate, int sequenceId) {
return tokenize(seq1, innerTokenize(seq1), seq2, truncate, sequenceId);
}
/**
* The same as {@link NlpTokenizer#tokenize(String, String, Tokenization.Truncate, int)} but allows for tokenizing the first sequence
* only once. Useful for zero shot classification.
* @param seq1 The first sequence
* @param innerResultSeq1 The tokenization of the first sequence
* @param seq2 The second sequence in the pair
* @param truncate truncate settings
* @param sequenceId The unique id for this tokenization request
* @return tokenization result for the sequence pair
*/
public TokenizationResult.Tokens tokenize(
String seq1,
InnerTokenization innerResultSeq1,
String seq2,
Tokenization.Truncate truncate,
int sequenceId
) {
List<? extends DelimitedToken.Encoded> tokenIdsSeq1 = innerResultSeq1.tokens;
List<Integer> tokenPositionMapSeq1 = innerResultSeq1.tokenPositionMap;
var innerResultSeq2 = innerTokenize(seq2);
List<? extends DelimitedToken.Encoded> tokenIdsSeq2 = innerResultSeq2.tokens;
List<Integer> tokenPositionMapSeq2 = innerResultSeq2.tokenPositionMap;
if (isWithSpecialTokens() == false) {
throw new IllegalArgumentException("Unable to do sequence pair tokenization without special tokens");
}
int extraTokens = getNumExtraTokensForSeqPair();
int numTokens = tokenIdsSeq1.size() + tokenIdsSeq2.size() + extraTokens;
boolean isTruncated = false;
if (numTokens > maxSequenceLength()) {
switch (truncate) {
case FIRST -> {
isTruncated = true;
if (tokenIdsSeq2.size() > maxSequenceLength() - extraTokens) {
throw ExceptionsHelper.badRequestException(
"Attempting truncation [{}] but input is too large for the second sequence. "
+ "The tokenized input length [{}] exceeds the maximum sequence length [{}], "
+ "when taking special tokens into account",
truncate.toString(),
tokenIdsSeq2.size(),
maxSequenceLength() - extraTokens
);
}
tokenIdsSeq1 = tokenIdsSeq1.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq2.size());
tokenPositionMapSeq1 = tokenPositionMapSeq1.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq2.size());
}
case SECOND -> {
isTruncated = true;
if (tokenIdsSeq1.size() > maxSequenceLength() - extraTokens) {
throw ExceptionsHelper.badRequestException(
"Attempting truncation [{}] but input is too large for the first sequence. "
+ "The tokenized input length [{}] exceeds the maximum sequence length [{}], "
+ "when taking special tokens into account",
truncate.toString(),
tokenIdsSeq1.size(),
maxSequenceLength() - extraTokens
);
}
tokenIdsSeq2 = tokenIdsSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size());
tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size());
}
case BALANCED -> {
isTruncated = true;
int firstSequenceLength = 0;
if (tokenIdsSeq2.size() > (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2) {
firstSequenceLength = min(tokenIdsSeq1.size(), (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2);
} else {
firstSequenceLength = min(
tokenIdsSeq1.size(),
maxSequenceLength() - tokenIdsSeq2.size() - getNumExtraTokensForSeqPair()
);
}
int secondSequenceLength = min(
tokenIdsSeq2.size(),
maxSequenceLength() - firstSequenceLength - getNumExtraTokensForSeqPair()
);
tokenIdsSeq1 = tokenIdsSeq1.subList(0, firstSequenceLength);
tokenPositionMapSeq1 = tokenPositionMapSeq1.subList(0, firstSequenceLength);
tokenIdsSeq2 = tokenIdsSeq2.subList(0, secondSequenceLength);
tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, secondSequenceLength);
}
case NONE -> throw ExceptionsHelper.badRequestException(
"Input too large. The tokenized input length [{}] exceeds the maximum sequence length [{}]",
numTokens,
maxSequenceLength()
);
}
}
return createTokensBuilder(clsTokenId(), sepTokenId(), isWithSpecialTokens()).addSequencePair(
tokenIdsSeq1.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList()),
tokenPositionMapSeq1,
tokenIdsSeq2.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList()),
tokenPositionMapSeq2
).build(List.of(seq1, seq2), isTruncated, List.of(innerResultSeq1.tokens, innerResultSeq2.tokens), -1, sequenceId);
}
/**
* Tokenize the two sequences, allowing for spanning of the 2nd sequence
* @param seq1 The first sequence in the pair
* @param seq2 The second sequence
* @param truncate truncate settings
* @param span the spanning settings, how many tokens to overlap.
* We split and span on seq2.
* @param sequenceId Unique sequence id for this tokenization
* @return tokenization result for the sequence pair
*/
public List<TokenizationResult.Tokens> tokenize(String seq1, String seq2, Tokenization.Truncate truncate, int span, int sequenceId) {
if (isWithSpecialTokens() == false) {
throw new IllegalArgumentException("Unable to do sequence pair tokenization without special tokens");
}
var innerResultSeq1 = innerTokenize(seq1);
List<? extends DelimitedToken.Encoded> tokenIdsSeq1 = innerResultSeq1.tokens;
List<Integer> tokenPositionMapSeq1 = innerResultSeq1.tokenPositionMap;
var innerResultSeq2 = innerTokenize(seq2);
List<? extends DelimitedToken.Encoded> tokenIdsSeq2 = innerResultSeq2.tokens;
List<Integer> tokenPositionMapSeq2 = innerResultSeq2.tokenPositionMap;
int extraTokens = getNumExtraTokensForSeqPair();
int numTokens = tokenIdsSeq1.size() + tokenIdsSeq2.size() + extraTokens;
boolean isTruncated = false;
if (numTokens > maxSequenceLength() && span < 0) {
switch (truncate) {
case FIRST -> {
isTruncated = true;
if (tokenIdsSeq2.size() > maxSequenceLength() - extraTokens) {
throw ExceptionsHelper.badRequestException(
"Attempting truncation [{}] but input is too large for the second sequence. "
+ "The tokenized input length [{}] exceeds the maximum sequence length [{}], "
+ "when taking special tokens into account",
truncate.toString(),
tokenIdsSeq2.size(),
maxSequenceLength() - extraTokens
);
}
tokenIdsSeq1 = tokenIdsSeq1.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq2.size());
tokenPositionMapSeq1 = tokenPositionMapSeq1.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq2.size());
}
case SECOND -> {
isTruncated = true;
if (tokenIdsSeq1.size() > maxSequenceLength() - extraTokens) {
throw ExceptionsHelper.badRequestException(
"Attempting truncation [{}] but input is too large for the first sequence. "
+ "The tokenized input length [{}] exceeds the maximum sequence length [{}], "
+ "when taking special tokens into account",
truncate.toString(),
tokenIdsSeq1.size(),
maxSequenceLength() - extraTokens
);
}
tokenIdsSeq2 = tokenIdsSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size());
tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size());
}
case BALANCED -> {
isTruncated = true;
int firstSequenceLength = 0;
if (tokenIdsSeq2.size() > (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2) {
firstSequenceLength = min(tokenIdsSeq1.size(), (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2);
} else {
firstSequenceLength = min(
tokenIdsSeq1.size(),
maxSequenceLength() - tokenIdsSeq2.size() - getNumExtraTokensForSeqPair()
);
}
int secondSequenceLength = min(
tokenIdsSeq2.size(),
maxSequenceLength() - firstSequenceLength - getNumExtraTokensForSeqPair()
);
tokenIdsSeq1 = tokenIdsSeq1.subList(0, firstSequenceLength);
tokenPositionMapSeq1 = tokenPositionMapSeq1.subList(0, firstSequenceLength);
tokenIdsSeq2 = tokenIdsSeq2.subList(0, secondSequenceLength);
tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, secondSequenceLength);
}
case NONE -> throw ExceptionsHelper.badRequestException(
"Input too large. The tokenized input length [{}] exceeds the maximum sequence length [{}]",
numTokens,
maxSequenceLength()
);
}
}
if (isTruncated || numTokens < maxSequenceLength()) {// indicates no spanning
return List.of(
createTokensBuilder(clsTokenId(), sepTokenId(), isWithSpecialTokens()).addSequencePair(
tokenIdsSeq1.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList()),
tokenPositionMapSeq1,
tokenIdsSeq2.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList()),
tokenPositionMapSeq2
).build(List.of(seq1, seq2), isTruncated, List.of(innerResultSeq1.tokens, innerResultSeq2.tokens), -1, sequenceId)
);
}
List<TokenizationResult.Tokens> toReturn = new ArrayList<>();
int splitEndPos = 0;
int splitStartPos = 0;
int spanPrev = -1;
List<Integer> seq1TokenIds = tokenIdsSeq1.stream().map(DelimitedToken.Encoded::getEncoding).collect(Collectors.toList());
final int trueMaxSeqLength = maxSequenceLength() - extraTokens - tokenIdsSeq1.size();
if (trueMaxSeqLength <= 0) {
throw new IllegalArgumentException(
Strings.format(
"Unable to do sequence pair tokenization: the first sequence [%d tokens] "
+ "is longer than the max sequence length [%d tokens]",
tokenIdsSeq1.size() + extraTokens,
maxSequenceLength()
)
);
}
if (span > trueMaxSeqLength) {
throw new IllegalArgumentException(
Strings.format(
"Unable to do sequence pair tokenization: the combined first sequence, span length and delimiting tokens"
+ " [%d + %d + %d = %d tokens] is longer than the max sequence length [%d tokens]."
+ " Reduce the size of the [span] window.",
tokenIdsSeq1.size(),
span,
extraTokens,
tokenIdsSeq1.size() + span + extraTokens,
maxSequenceLength()
)
);
}
while (splitEndPos < tokenIdsSeq2.size()) {
splitEndPos = min(splitStartPos + trueMaxSeqLength, tokenIdsSeq2.size());
// Make sure we do not end on a word
if (splitEndPos != tokenIdsSeq2.size()) {
while (splitEndPos > splitStartPos + 1
&& Objects.equals(tokenPositionMapSeq2.get(splitEndPos), tokenPositionMapSeq2.get(splitEndPos - 1))) {
splitEndPos--;
}
}
toReturn.add(
createTokensBuilder(clsTokenId(), sepTokenId(), isWithSpecialTokens()).addSequencePair(
seq1TokenIds,
tokenPositionMapSeq1,
tokenIdsSeq2.subList(splitStartPos, splitEndPos)
.stream()
.map(DelimitedToken.Encoded::getEncoding)
.collect(Collectors.toList()),
tokenPositionMapSeq2.subList(splitStartPos, splitEndPos)
)
.build(
List.of(seq1, seq2),
false,
List.of(tokenIdsSeq1, tokenIdsSeq2.subList(splitStartPos, splitEndPos)),
spanPrev,
sequenceId
)
);
spanPrev = span;
int prevSplitStart = splitStartPos;
splitStartPos = splitEndPos - span;
if (splitStartPos <= prevSplitStart) {
// Tokenization is not progressing, the start pos has
// not moved forward leading to an infinite loop.
// In practice this is probably due to the span
// setting being very close to the 2nd sequence length
// and the sequence ending in a long word that tokenizes
// to a number of elements greater than the difference
// between span and 2nd sequence length
throw new IllegalStateException(
"Tokenization cannot be satisfied with the current span setting. Consider decreasing the span setting"
);
}
// try to back up our split so that it starts at the first whole word
if (splitStartPos < tokenIdsSeq2.size()) {
while (splitStartPos > (prevSplitStart + 1)
&& Objects.equals(tokenPositionMapSeq2.get(splitStartPos), tokenPositionMapSeq2.get(splitStartPos - 1))) {
splitStartPos--;
spanPrev++;
}
}
}
return toReturn;
}
public abstract NlpTask.RequestBuilder requestBuilder();
public abstract OptionalInt getPadTokenId();
public abstract String getPadToken();
public abstract OptionalInt getMaskTokenId();
public abstract String getMaskToken();
public abstract List<String> getVocabulary();
public int getSpan() {
return -1;
}
abstract TokenizationResult.TokensBuilder createTokensBuilder(int clsTokenId, int sepTokenId, boolean withSpecialTokens);
public abstract InnerTokenization innerTokenize(String seq);
public static NlpTokenizer build(Vocabulary vocabulary, Tokenization params) throws IOException {
ExceptionsHelper.requireNonNull(params, TOKENIZATION);
ExceptionsHelper.requireNonNull(vocabulary, VOCABULARY);
if (params instanceof BertTokenization) {
return BertTokenizer.builder(vocabulary.get(), params).build();
}
if (params instanceof BertJapaneseTokenization) {
return BertJapaneseTokenizer.builder(vocabulary.get(), params).build();
}
if (params instanceof MPNetTokenization) {
return MPNetTokenizer.mpBuilder(vocabulary.get(), params).build();
}
if (params instanceof RobertaTokenization robertaTokenization) {
return RobertaTokenizer.builder(vocabulary.get(), vocabulary.merges(), robertaTokenization).build();
}
if (params instanceof XLMRobertaTokenization xlmRobertaTokenization) {
return XLMRobertaTokenizer.builder(vocabulary.get(), vocabulary.scores(), xlmRobertaTokenization).build();
}
if (params instanceof DebertaV2Tokenization debertaV2Tokenization) {
return DebertaV2Tokenizer.builder(vocabulary.get(), vocabulary.scores(), debertaV2Tokenization).build();
}
throw new IllegalArgumentException("unknown tokenization type [" + params.getName() + "]");
}
public record InnerTokenization(List<? extends DelimitedToken.Encoded> tokens, List<Integer> tokenPositionMap) {}
}
| NlpTokenizer |
java | apache__flink | flink-datastream/src/main/java/org/apache/flink/datastream/impl/extension/window/function/InternalTwoInputWindowStreamProcessFunction.java | {
"start": 2086,
"end": 4793
} | class ____<IN1, IN2, OUT, W extends Window>
implements TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> {
/** User-defined {@link WindowProcessFunction}. */
private final TwoInputNonBroadcastWindowStreamProcessFunction<IN1, IN2, OUT>
windowProcessFunction;
private final WindowAssigner<TaggedUnion<IN1, IN2>, W> assigner;
private final Trigger<TaggedUnion<IN1, IN2>, W> trigger;
/**
* The allowed lateness for elements. This is used for:
*
* <ul>
* <li>Deciding if an element should be dropped from a window due to lateness.
* <li>Clearing the state of a window if the time out-of the {@code window.maxTimestamp +
* allowedLateness} landmark.
* </ul>
*/
private final long allowedLateness;
private final WindowStrategy windowStrategy;
public InternalTwoInputWindowStreamProcessFunction(
TwoInputNonBroadcastWindowStreamProcessFunction<IN1, IN2, OUT> windowProcessFunction,
WindowAssigner<TaggedUnion<IN1, IN2>, W> assigner,
Trigger<TaggedUnion<IN1, IN2>, W> trigger,
long allowedLateness,
WindowStrategy windowStrategy) {
this.windowProcessFunction = windowProcessFunction;
this.assigner = assigner;
this.trigger = trigger;
this.allowedLateness = allowedLateness;
this.windowStrategy = windowStrategy;
}
@Override
public void processRecordFromFirstInput(
IN1 record, Collector<OUT> output, PartitionedContext<OUT> ctx) throws Exception {
// Do nothing as this will translator to windowOperator instead of processOperator, and this
// method will never be invoked.
}
@Override
public void processRecordFromSecondInput(
IN2 record, Collector<OUT> output, PartitionedContext<OUT> ctx) throws Exception {
// Do nothing as this will translator to windowOperator instead of processOperator, and this
// method will never be invoked.
}
public TwoInputNonBroadcastWindowStreamProcessFunction<IN1, IN2, OUT>
getWindowProcessFunction() {
return windowProcessFunction;
}
@Override
public Set<StateDeclaration> usesStates() {
return windowProcessFunction.usesStates();
}
public WindowAssigner<TaggedUnion<IN1, IN2>, W> getAssigner() {
return assigner;
}
public Trigger<TaggedUnion<IN1, IN2>, W> getTrigger() {
return trigger;
}
public long getAllowedLateness() {
return allowedLateness;
}
public WindowStrategy getWindowStrategy() {
return windowStrategy;
}
}
| InternalTwoInputWindowStreamProcessFunction |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java | {
"start": 3432,
"end": 10988
} | class ____ extends S3AScaleTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ILoadTestS3ABulkDeleteThrottling.class);
protected static final int THREADS = 20;
public static final int TOTAL_KEYS = 25000;
public static final int SMALL = BULK_DELETE_PAGE_SIZE_DEFAULT;
public static final int SMALL_REQS = TOTAL_KEYS / SMALL;
public static final int MAXIMUM = MAX_ENTRIES_TO_DELETE;
public static final int MAXIMUM_REQS = TOTAL_KEYS / MAXIMUM;
// shared across test cases.
@SuppressWarnings("StaticNonFinalField")
private static boolean testWasThrottled;
private final ExecutorService executor =
HadoopExecutors.newFixedThreadPool(
THREADS,
new ThreadFactoryBuilder()
.setNameFormat("#%d")
.build());
private final CompletionService<Outcome>
completionService = new ExecutorCompletionService<>(executor);
private File dataDir;
private final boolean throttle;
private final int pageSize;
private final int requests;
/**
* Test array for parameterized test runs.
* <ul>
* <li>AWS client throttle on/off</li>
* <li>Page size</li>
* </ul>
*
* @return a list of parameter tuples.
*/
public static Collection<Object[]> params() {
return Arrays.asList(new Object[][]{
{false, SMALL, SMALL_REQS},
{false, MAXIMUM, MAXIMUM_REQS},
{true, SMALL, SMALL_REQS},
{true, MAXIMUM, MAXIMUM_REQS},
});
}
/**
* Parameterized constructor.
* @param throttle AWS client throttle on/off
* @param pageSize Page size
* @param requests request count;
*/
public ILoadTestS3ABulkDeleteThrottling(
final boolean throttle,
final int pageSize,
final int requests) {
this.throttle = throttle;
Preconditions.checkArgument(pageSize > 0,
"page size too low %s", pageSize);
this.pageSize = pageSize;
this.requests = requests;
}
@Override
protected Configuration createScaleConfiguration() {
Configuration conf = super.createScaleConfiguration();
S3ATestUtils.removeBaseAndBucketOverrides(conf,
EXPERIMENTAL_AWS_INTERNAL_THROTTLING,
BULK_DELETE_PAGE_SIZE,
USER_AGENT_PREFIX,
ENABLE_MULTI_DELETE);
conf.setBoolean(EXPERIMENTAL_AWS_INTERNAL_THROTTLING, throttle);
conf.setInt(BULK_DELETE_PAGE_SIZE, pageSize);
conf.set(USER_AGENT_PREFIX,
String.format("ILoadTestS3ABulkDeleteThrottling-%s-%04d",
throttle, pageSize));
S3ATestUtils.disableFilesystemCaching(conf);
return conf;
}
@Override
@BeforeEach
public void setup() throws Exception {
super.setup();
final Configuration conf = getConf();
assumeTrue(conf.getBoolean(ENABLE_MULTI_DELETE, true),
"multipart delete disabled");
dataDir = GenericTestUtils.getTestDir("throttling");
dataDir.mkdirs();
final String size = getFileSystem().getConf().get(BULK_DELETE_PAGE_SIZE);
Assertions.assertThat(size)
.describedAs("page size")
.isNotEmpty();
Assertions.assertThat(getFileSystem().getConf()
.getInt(BULK_DELETE_PAGE_SIZE, -1))
.isEqualTo(pageSize);
}
@Test
public void test_010_Reset() throws Throwable {
testWasThrottled = false;
}
@Test
public void test_020_DeleteThrottling() throws Throwable {
describe("test how S3 reacts to massive multipart deletion requests");
final File results = deleteFiles(requests, pageSize);
LOG.info("Test run completed against {}:\n see {}", getFileSystem(),
results);
if (testWasThrottled) {
LOG.warn("Test was throttled");
} else {
LOG.info("No throttling recorded in filesystem");
}
}
@Test
public void test_030_Sleep() throws Throwable {
maybeSleep();
}
private void maybeSleep() throws InterruptedException, IOException {
if (testWasThrottled) {
LOG.info("Sleeping briefly to let store recover");
Thread.sleep(30_000);
getFileSystem().delete(path("recovery"), true);
testWasThrottled = false;
}
}
/**
* delete files.
* @param requestCount number of requests.
* @throws Exception failure
* @return CSV filename
*/
private File deleteFiles(final int requestCount,
final int entries)
throws Exception {
File csvFile = new File(dataDir,
String.format("delete-%03d-%04d-%s.csv",
requestCount, entries, throttle));
describe("Issuing %d requests of size %d, saving log to %s",
requestCount, entries, csvFile);
Path basePath = path("testDeleteObjectThrottling");
final S3AFileSystem fs = getFileSystem();
final String base = fs.pathToKey(basePath);
final List<ObjectIdentifier> fileList
= buildDeleteRequest(base, entries);
final FileWriter out = new FileWriter(csvFile);
Csvout csvout = new Csvout(out, "\t", "\n");
Outcome.writeSchema(csvout);
final ContractTestUtils.NanoTimer jobTimer =
new ContractTestUtils.NanoTimer();
for (int i = 0; i < requestCount; i++) {
final int id = i;
completionService.submit(() -> {
final long startTime = System.currentTimeMillis();
Thread.currentThread().setName("#" + id);
LOG.info("Issuing request {}", id);
final ContractTestUtils.NanoTimer timer =
new ContractTestUtils.NanoTimer();
Exception ex = null;
try (AuditSpan span = span()) {
fs.removeKeys(fileList, false);
} catch (IOException e) {
ex = e;
}
timer.end("Request " + id);
return new Outcome(id, startTime, timer,
ex);
});
}
NanoTimerStats stats = new NanoTimerStats("Overall");
NanoTimerStats success = new NanoTimerStats("Successful");
NanoTimerStats throttled = new NanoTimerStats("Throttled");
List<Outcome> throttledEvents = new ArrayList<>();
for (int i = 0; i < requestCount; i++) {
Outcome outcome = completionService.take().get();
ContractTestUtils.NanoTimer timer = outcome.timer;
Exception ex = outcome.exception;
outcome.writeln(csvout);
stats.add(timer);
if (ex != null) {
// throttling event occurred.
LOG.info("Throttled at event {}", i, ex);
throttled.add(timer);
throttledEvents.add(outcome);
} else {
success.add(timer);
}
}
csvout.close();
jobTimer.end("Execution of operations");
// now print the stats
LOG.info("Summary file is " + csvFile);
LOG.info("Made {} requests with {} throttle events\n: {}\n{}\n{}",
requestCount,
throttled.getCount(),
stats,
throttled,
success);
double duration = jobTimer.duration();
double iops = requestCount * entries * 1.0e9 / duration;
LOG.info(String.format("TPS %3f operations/second",
iops));
// log at debug
if (LOG.isDebugEnabled()) {
throttledEvents.forEach((outcome -> {
LOG.debug("{}: duration: {}",
outcome.id, outcome.timer.elapsedTimeMs());
}));
}
return csvFile;
}
private List<ObjectIdentifier> buildDeleteRequest(
String base, int count) {
List<ObjectIdentifier> request = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
request.add(ObjectIdentifier.builder().key(
String.format("%s/file-%04d", base, i)).build());
}
return request;
}
/**
* Outcome of one of the load operations.
*/
private static | ILoadTestS3ABulkDeleteThrottling |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/deftyping/TestDefaultForEnums.java | {
"start": 941,
"end": 994
} | class ____<T> {
public T item;
}
| Foo3569 |
java | apache__camel | components/camel-tracing/src/main/java/org/apache/camel/tracing/decorators/AzureStorageDataLakeSpanDecorator.java | {
"start": 1099,
"end": 5645
} | class ____ extends AbstractSpanDecorator {
static final String STORAGE_DATALAKE_DIRECTORY_NAME = "directoryName";
static final String STORAGE_DATALAKE_FILE_NAME = "fileName";
static final String STORAGE_DATALAKE_PATH = "path";
static final String STORAGE_DATALAKE_TIMEOUT = "timeout";
static final String STORAGE_DATALAKE_CONTENT_TYPE = "contentType";
static final String STORAGE_DATALAKE_METADATA = "metadata";
static final String STORAGE_DATALAKE_LAST_MODIFIED = "lastModified";
static final String STORAGE_DATALAKE_POSITION = "position";
static final String STORAGE_DATALAKE_EXPRESSION = "expression";
/**
* Constants copied from {@link org.apache.camel.component.azure.storage.datalake.DataLakeConstants}
*/
static final String OPERATION = "CamelAzureStorageDataLakeOperation";
static final String FILESYSTEM_NAME = "CamelAzureStorageDataLakeFileSystemName";
static final String DIRECTORY_NAME = "CamelAzureStorageDataLakeDirectoryName";
static final String FILE_NAME = "CamelAzureStorageDataLakeFileName";
static final String PATH = "CamelAzureStorageDataLakePath";
static final String TIMEOUT = "CamelAzureStorageDataLakeTimeout";
static final String CONTENT_TYPE = "CamelAzureStorageDataLakeContentType";
static final String METADATA = "CamelAzureStorageDataLakeMetadata";
static final String LAST_MODIFIED = "CamelAzureStorageDataLakeLastModified";
static final String POSITION = "CamelAzureStorageDataLakePosition";
static final String EXPRESSION = "CamelAzureStorageDataLakeExpression";
@Override
public String getComponent() {
return "azure-storage-datalake";
}
@Override
public String getComponentClassName() {
return "org.apache.camel.component.azure.storage.datalake.DataLakeComponent";
}
@Override
public String getOperationName(Exchange exchange, Endpoint endpoint) {
String operation = exchange.getIn().getHeader(OPERATION, String.class);
if (operation == null) {
Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri());
return queryParameters.containsKey("operation")
? queryParameters.get("operation")
: super.getOperationName(exchange, endpoint);
}
return operation;
}
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
span.setTag(TagConstants.DB_SYSTEM, getComponent());
String fileSystemName = exchange.getIn().getHeader(FILESYSTEM_NAME, String.class);
if (fileSystemName != null) {
span.setTag(TagConstants.DB_NAME, fileSystemName);
}
String directoryName = exchange.getIn().getHeader(DIRECTORY_NAME, String.class);
if (directoryName != null) {
span.setTag(STORAGE_DATALAKE_DIRECTORY_NAME, directoryName);
}
String fileName = exchange.getIn().getHeader(FILE_NAME, String.class);
if (fileName != null) {
span.setTag(STORAGE_DATALAKE_FILE_NAME, fileName);
}
String path = exchange.getIn().getHeader(PATH, String.class);
if (path != null) {
span.setTag(STORAGE_DATALAKE_PATH, path);
}
Duration timeout = exchange.getIn().getHeader(TIMEOUT, Duration.class);
if (timeout != null) {
span.setTag(STORAGE_DATALAKE_TIMEOUT, timeout.toString());
}
String contentType = exchange.getIn().getHeader(CONTENT_TYPE, String.class);
if (contentType != null) {
span.setTag(STORAGE_DATALAKE_CONTENT_TYPE, contentType);
}
Map metadata = exchange.getIn().getHeader(METADATA, Map.class);
if (metadata != null) {
span.setTag(STORAGE_DATALAKE_METADATA, metadata.toString());
}
OffsetDateTime lastModified = exchange.getIn().getHeader(LAST_MODIFIED, OffsetDateTime.class);
if (lastModified != null) {
span.setTag(STORAGE_DATALAKE_LAST_MODIFIED, lastModified.toString());
}
Long position = exchange.getIn().getHeader(POSITION, Long.class);
if (position != null) {
span.setTag(STORAGE_DATALAKE_POSITION, position);
}
String expression = exchange.getIn().getHeader(EXPRESSION, String.class);
if (expression != null) {
span.setTag(STORAGE_DATALAKE_EXPRESSION, expression);
}
}
}
| AzureStorageDataLakeSpanDecorator |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTestingUtils.java | {
"start": 40504,
"end": 41209
} | class ____ implements SimpleVersionedSerializer<String> {
static final int VERSION = 77;
@Override
public int getVersion() {
return VERSION;
}
@Override
public byte[] serialize(String checkpointData) throws IOException {
return checkpointData.getBytes(StandardCharsets.UTF_8);
}
@Override
public String deserialize(int version, byte[] serialized) throws IOException {
if (version != VERSION) {
throw new IOException("version mismatch");
}
return new String(serialized, StandardCharsets.UTF_8);
}
}
// ----------------- Mock | StringSerializer |
java | dropwizard__dropwizard | dropwizard-health/src/main/java/io/dropwizard/health/DelayedShutdownHandler.java | {
"start": 388,
"end": 1357
} | class ____ extends AbstractLifeCycle {
private static final Logger LOGGER = LoggerFactory.getLogger(DelayedShutdownHandler.class);
private final ShutdownNotifier shutdownNotifier;
public DelayedShutdownHandler(final ShutdownNotifier shutdownNotifier) {
this.shutdownNotifier = shutdownNotifier;
}
public void register() {
try {
start(); // lifecycle must be started in order for stop() to be called
// register the shutdown handler as first (index 0) so that it executes before Jetty's shutdown behavior
ShutdownThread.register(0, this);
} catch (Exception e) {
LOGGER.error("failed setting up delayed shutdown handler", e);
throw new IllegalStateException("failed setting up delayed shutdown handler", e);
}
}
@Override
protected void doStop() throws Exception {
shutdownNotifier.notifyShutdownStarted();
}
}
| DelayedShutdownHandler |
java | elastic__elasticsearch | client/benchmark/src/main/java/org/elasticsearch/client/benchmark/BenchmarkMain.java | {
"start": 664,
"end": 1223
} | class ____ {
@SuppressForbidden(reason = "system out is ok for a command line tool")
public static void main(String[] args) throws Exception {
String type = args[0];
AbstractBenchmark<?> benchmark = switch (type) {
case "rest" -> new RestClientBenchmark();
default -> {
System.err.println("Unknown client type [" + type + "]");
System.exit(1);
yield null;
}
};
benchmark.run(Arrays.copyOfRange(args, 1, args.length));
}
}
| BenchmarkMain |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/util/reflection/FieldInitializerTest.java | {
"start": 6049,
"end": 6772
} | class ____ {
@InjectMocks LocalType field;
}
TheTestWithLocalType testWithLocalType = new TheTestWithLocalType();
// when / then
assertThatThrownBy(
() -> {
new FieldInitializer(
testWithLocalType,
testWithLocalType.getClass().getDeclaredField("field"));
})
.isInstanceOf(MockitoException.class)
.hasMessage("the type 'LocalType' is a local class.");
}
@Test
public void should_not_fail_if_local_type_field_is_instantiated() throws Exception {
// when
| TheTestWithLocalType |
java | apache__dubbo | dubbo-cluster/src/test/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/virtualservice/match/ListBoolMatchTest.java | {
"start": 1087,
"end": 1724
} | class ____ {
@Test
void isMatch() {
ListBoolMatch listBoolMatch = new ListBoolMatch();
List<BoolMatch> oneof = new ArrayList<>();
BoolMatch boolMatch1 = new BoolMatch();
boolMatch1.setExact(true);
oneof.add(boolMatch1);
listBoolMatch.setOneof(oneof);
assertTrue(listBoolMatch.isMatch(true));
assertFalse(listBoolMatch.isMatch(false));
BoolMatch boolMatch2 = new BoolMatch();
boolMatch2.setExact(false);
oneof.add(boolMatch2);
listBoolMatch.setOneof(oneof);
assertTrue(listBoolMatch.isMatch(false));
}
}
| ListBoolMatchTest |
java | redisson__redisson | redisson/src/test/java/org/redisson/RedissonRemoteServiceTest.java | {
"start": 1980,
"end": 2405
} | interface ____ {
RFuture<Void> cancelMethod();
RFuture<Void> voidMethod(String name, Long param);
RFuture<Long> resultMethod(Long value);
RFuture<Void> errorMethod();
RFuture<Void> errorMethodWithCause();
RFuture<Void> timeoutMethod();
}
@RRemoteReactive(RemoteInterface.class)
public | RemoteInterfaceAsync |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/datasource/pooled/PooledDataSourceTest.java | {
"start": 1210,
"end": 5131
} | class ____ {
PooledDataSource dataSource;
@BeforeEach
void beforeEach() {
dataSource = new PooledDataSource("org.hsqldb.jdbcDriver", "jdbc:hsqldb:mem:multipledrivers", "sa", "");
}
@Test
void shouldBlockUntilConnectionIsAvailableInPooledDataSource() throws Exception {
dataSource.setPoolMaximumCheckoutTime(20000);
List<Connection> connections = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
for (int i = 0; i < dataSource.getPoolMaximumActiveConnections(); i++) {
connections.add(dataSource.getConnection());
}
new Thread(() -> {
try {
dataSource.getConnection();
latch.countDown();
} catch (Exception e) {
throw new RuntimeException(e);
}
}).start();
assertFalse(latch.await(1000, TimeUnit.MILLISECONDS));
connections.get(0).close();
assertTrue(latch.await(1000, TimeUnit.MILLISECONDS));
}
@Test
void PoppedConnectionShouldBeNotEqualToClosedConnection() throws Exception {
Connection connectionToClose = dataSource.getConnection();
CountDownLatch latch = new CountDownLatch(1);
new Thread(() -> {
try {
latch.await();
assertNotEquals(connectionToClose, dataSource.getConnection());
} catch (Exception e) {
throw new RuntimeException(e);
}
}).start();
connectionToClose.close();
latch.countDown();
}
@Test
void shouldEnsureCorrectIdleConnectionCount() throws Exception {
dataSource.setPoolMaximumActiveConnections(10);
dataSource.setPoolMaximumIdleConnections(5);
PoolState poolState = dataSource.getPoolState();
List<Connection> connections = new ArrayList<>();
for (int i = 0; i < dataSource.getPoolMaximumActiveConnections(); i++) {
connections.add(dataSource.getConnection());
}
assertEquals(0, poolState.getIdleConnectionCount());
for (int i = 0; i < dataSource.getPoolMaximumActiveConnections(); i++) {
connections.get(i).close();
}
assertEquals(dataSource.getPoolMaximumIdleConnections(), poolState.getIdleConnectionCount());
for (int i = 0; i < dataSource.getPoolMaximumIdleConnections(); i++) {
dataSource.getConnection();
}
assertEquals(0, poolState.getIdleConnectionCount());
}
@Test
void connectionShouldBeAvailableAfterMaximumCheckoutTime() throws Exception {
dataSource.setPoolMaximumCheckoutTime(1000);
dataSource.setPoolTimeToWait(500);
int poolMaximumActiveConnections = dataSource.getPoolMaximumActiveConnections();
CountDownLatch latch = new CountDownLatch(1);
for (int i = 0; i < poolMaximumActiveConnections; i++) {
dataSource.getConnection();
}
new Thread(() -> {
try {
dataSource.getConnection();
latch.countDown();
} catch (Exception e) {
throw new RuntimeException(e);
}
}).start();
assertTrue(latch.await(5000, TimeUnit.MILLISECONDS));
}
@Test
void forceCloseAllShouldRemoveAllActiveAndIdleConnection() throws SQLException {
dataSource.setPoolMaximumActiveConnections(10);
dataSource.setPoolMaximumIdleConnections(5);
PoolState poolState = dataSource.getPoolState();
List<Connection> connections = new ArrayList<>();
for (int i = 0; i < dataSource.getPoolMaximumActiveConnections(); i++) {
connections.add(dataSource.getConnection());
}
for (int i = 0; i < dataSource.getPoolMaximumIdleConnections(); i++) {
connections.get(i).close();
}
assertEquals(dataSource.getPoolMaximumActiveConnections() - poolState.getIdleConnectionCount(),
poolState.getActiveConnectionCount());
assertEquals(dataSource.getPoolMaximumIdleConnections(), poolState.getIdleConnectionCount());
dataSource.forceCloseAll();
assertEquals(0, poolState.getActiveConnectionCount());
assertEquals(0, poolState.getIdleConnectionCount());
}
}
| PooledDataSourceTest |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/GetAllProducerInfoRequestHeader.java | {
"start": 1329,
"end": 1584
} | class ____ implements CommandCustomHeader {
@Override
public void checkFields() throws RemotingCommandException {
// To change body of implemented methods use File | Settings | File
// Templates.
}
}
| GetAllProducerInfoRequestHeader |
java | quarkusio__quarkus | extensions/hibernate-search-orm-outbox-polling/deployment/src/test/java/io/quarkus/hibernate/search/orm/outboxpolling/test/configuration/devmode/HibernateSearchOutboxPollingTestResource.java | {
"start": 5064,
"end": 5686
} | class ____ {
@Id
@GeneratedValue
private Long id;
@FullTextField
@KeywordField(name = "name_sort", sortable = Sortable.YES)
private String name;
Person() {
}
public Person(String name) {
this.name = name;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static | Person |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/server/TThreadPoolServer.java | {
"start": 1949,
"end": 6127
} | class ____ extends AbstractServerArgs<Args> {
public int minWorkerThreads = 5;
public int maxWorkerThreads = Integer.MAX_VALUE;
public ExecutorService executorService;
public int stopTimeoutVal = 60;
public TimeUnit stopTimeoutUnit = TimeUnit.SECONDS;
public Args(TServerTransport transport) {
super(transport);
}
public Args minWorkerThreads(int n) {
minWorkerThreads = n;
return this;
}
public Args maxWorkerThreads(int n) {
maxWorkerThreads = n;
return this;
}
public Args stopTimeoutVal(int n) {
stopTimeoutVal = n;
return this;
}
public Args stopTimeoutUnit(TimeUnit tu) {
stopTimeoutUnit = tu;
return this;
}
public Args executorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
}
// Executor service for handling client connections
private final ExecutorService executorService_;
private final TimeUnit stopTimeoutUnit;
private final long stopTimeoutVal;
public TThreadPoolServer(Args args) {
super(args);
stopTimeoutUnit = args.stopTimeoutUnit;
stopTimeoutVal = args.stopTimeoutVal;
executorService_ =
args.executorService != null ? args.executorService : createDefaultExecutorService(args);
}
private static ExecutorService createDefaultExecutorService(Args args) {
return new ThreadPoolExecutor(
args.minWorkerThreads,
args.maxWorkerThreads,
60L,
TimeUnit.SECONDS,
new SynchronousQueue<>(),
new ThreadFactory() {
final AtomicLong count = new AtomicLong();
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName(
String.format("TThreadPoolServer WorkerProcess-%d", count.getAndIncrement()));
return thread;
}
});
}
protected ExecutorService getExecutorService() {
return executorService_;
}
protected boolean preServe() {
try {
serverTransport_.listen();
} catch (TTransportException ttx) {
LOGGER.error("Error occurred during listening.", ttx);
return false;
}
// Run the preServe event
if (eventHandler_ != null) {
eventHandler_.preServe();
}
stopped_ = false;
setServing(true);
return true;
}
@Override
public void serve() {
if (!preServe()) {
return;
}
execute();
executorService_.shutdownNow();
if (!waitForShutdown()) {
LOGGER.error("Shutdown is not done after " + stopTimeoutVal + stopTimeoutUnit);
}
setServing(false);
}
protected void execute() {
while (!stopped_) {
try {
TTransport client = serverTransport_.accept();
try {
executorService_.execute(new WorkerProcess(client));
} catch (RejectedExecutionException ree) {
if (!stopped_) {
LOGGER.warn(
"ThreadPool is saturated with incoming requests. Closing latest connection.");
}
client.close();
}
} catch (TTransportException ttx) {
if (!stopped_) {
LOGGER.warn("Transport error occurred during acceptance of message", ttx);
}
}
}
}
protected boolean waitForShutdown() {
// Loop until awaitTermination finally does return without a interrupted
// exception. If we don't do this, then we'll shut down prematurely. We want
// to let the executorService clear it's task queue, closing client sockets
// appropriately.
long timeoutMS = stopTimeoutUnit.toMillis(stopTimeoutVal);
long now = System.currentTimeMillis();
while (timeoutMS >= 0) {
try {
return executorService_.awaitTermination(timeoutMS, TimeUnit.MILLISECONDS);
} catch (InterruptedException ix) {
long newnow = System.currentTimeMillis();
timeoutMS -= (newnow - now);
now = newnow;
}
}
return false;
}
@Override
public void stop() {
stopped_ = true;
serverTransport_.interrupt();
}
private | Args |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java | {
"start": 1252,
"end": 3736
} | interface ____ {
/**
* Allows plugins to override how we allocate shards that may already exist on disk in the cluster.
*/
Setting<String> EXISTING_SHARDS_ALLOCATOR_SETTING = Setting.simpleString(
"index.allocation.existing_shards_allocator",
GatewayAllocator.ALLOCATOR_NAME,
Setting.Property.IndexScope,
Setting.Property.PrivateIndex
);
/**
* Called before starting a round of allocation, allowing the allocator to invalidate some caches if appropriate.
*/
void beforeAllocation(RoutingAllocation allocation);
/**
* Called during a round of allocation after attempting to allocate all the primaries but before any replicas, allowing the allocator
* to prepare for replica allocation.
*/
void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate<ShardRouting> isRelevantShardPredicate);
/**
* Allocate any unassigned shards in the given {@link RoutingAllocation} for which this {@link ExistingShardsAllocator} is responsible.
*/
void allocateUnassigned(
ShardRouting shardRouting,
RoutingAllocation allocation,
UnassignedAllocationHandler unassignedAllocationHandler
);
/**
* Returns an explanation for a single unassigned shard.
*/
AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation);
/**
* Called when this node becomes the elected master and when it stops being the elected master, so that implementations can clean up any
* in-flight activity from an earlier mastership.
*/
void cleanCaches();
/**
* Called when the given shards have started, so that implementations can invalidate caches and clean up any in-flight activity for
* those shards.
*/
void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation);
/**
* Called when the given shards have failed, so that implementations can invalidate caches and clean up any in-flight activity for
* those shards.
*/
void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation);
/**
* @return the number of in-flight fetches under this allocator's control.
*/
int getNumberOfInFlightFetches();
/**
* Used by {@link ExistingShardsAllocator#allocateUnassigned} to handle its allocation decisions. A restricted | ExistingShardsAllocator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/GenericNamedWriteable.java | {
"start": 528,
"end": 682
} | interface ____ allows specific NamedWritable objects to be serialized as part of the
* generic serialization in StreamOutput and StreamInput.
*/
public | that |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java | {
"start": 12852,
"end": 13482
} | class ____ extends DecoderException {
private static final long serialVersionUID = -7846841864603865638L;
public NotEnoughDataDecoderException() {
}
public NotEnoughDataDecoderException(String msg) {
super(msg);
}
public NotEnoughDataDecoderException(Throwable cause) {
super(cause);
}
public NotEnoughDataDecoderException(String msg, Throwable cause) {
super(msg, cause);
}
}
/**
* Exception when the body is fully decoded, even if there is still data
*/
public static | NotEnoughDataDecoderException |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/support/AnnotationSupport.java | {
"start": 16864,
"end": 17810
} | interface ____ which to find the fields; never {@code null}
* @param annotationType the annotation type to search for; never {@code null}
* @param predicate the field filter; never {@code null}
* @return the list of all such fields found; neither {@code null} nor mutable
* @since 1.10
* @see Class#getDeclaredFields()
* @see #findPublicAnnotatedFields(Class, Class, Class)
* @see #findAnnotatedFields(Class, Class, Predicate, HierarchyTraversalMode)
* @see ReflectionSupport#findFields(Class, Predicate, HierarchyTraversalMode)
* @see ReflectionSupport#tryToReadFieldValue(Field, Object)
*/
@API(status = MAINTAINED, since = "1.10")
public static List<Field> findAnnotatedFields(Class<?> clazz, Class<? extends Annotation> annotationType,
Predicate<Field> predicate) {
return AnnotationUtils.findAnnotatedFields(clazz, annotationType, predicate);
}
/**
* Find all distinct {@linkplain Field fields} of the supplied | in |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java | {
"start": 1770,
"end": 4751
} | class ____ {
private static final String BASEDIR = GenericTestUtils
.getTempPath(TestNameNodeHttpServer.class.getSimpleName());
private static String keystoresDir;
private static String sslConfDir;
private static Configuration conf;
private static URLConnectionFactory connectionFactory;
public static Collection<Object[]> policy() {
Object[][] params = new Object[][] { { HttpConfig.Policy.HTTP_ONLY },
{ HttpConfig.Policy.HTTPS_ONLY }, { HttpConfig.Policy.HTTP_AND_HTTPS } };
return Arrays.asList(params);
}
private final HttpConfig.Policy policy;
public TestNameNodeHttpServer(Policy policy) {
super();
this.policy = policy;
}
@BeforeAll
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
conf = new Configuration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getServerSSLConfigFileName());
}
@AfterAll
public static void tearDown() throws Exception {
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Test
public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
NameNodeHttpServer server = null;
try {
server = new NameNodeHttpServer(conf, null, addr);
server.start();
assertTrue(implies(policy.isHttpEnabled(),
canAccess("http", server.getHttpAddress())));
assertTrue(implies(!policy.isHttpEnabled(),
server.getHttpAddress() == null));
assertTrue(implies(policy.isHttpsEnabled(),
canAccess("https", server.getHttpsAddress())));
assertTrue(implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
} finally {
if (server != null) {
server.stop();
}
}
}
private static boolean canAccess(String scheme, InetSocketAddress addr) {
if (addr == null)
return false;
try {
URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr));
URLConnection conn = connectionFactory.openConnection(url);
conn.connect();
conn.getContent();
} catch (Exception e) {
return false;
}
return true;
}
private static boolean implies(boolean a, boolean b) {
return !a || b;
}
}
| TestNameNodeHttpServer |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/aot/hint/predicate/ReflectionHintsPredicates.java | {
"start": 19956,
"end": 21066
} | class ____ implements Predicate<RuntimeHints> {
private final Field field;
FieldHintPredicate(Field field) {
this.field = field;
}
@Override
public boolean test(RuntimeHints runtimeHints) {
TypeHint typeHint = runtimeHints.reflection().getTypeHint(this.field.getDeclaringClass());
if (typeHint == null) {
return false;
}
return memberCategoryMatch(typeHint) || exactMatch(typeHint);
}
@SuppressWarnings("removal")
private boolean memberCategoryMatch(TypeHint typeHint) {
if (Modifier.isPublic(this.field.getModifiers())) {
return typeHint.getMemberCategories().contains(MemberCategory.ACCESS_PUBLIC_FIELDS) ||
typeHint.getMemberCategories().contains(MemberCategory.PUBLIC_FIELDS);
}
else {
return typeHint.getMemberCategories().contains(MemberCategory.ACCESS_DECLARED_FIELDS) ||
typeHint.getMemberCategories().contains(MemberCategory.DECLARED_FIELDS);
}
}
private boolean exactMatch(TypeHint typeHint) {
return typeHint.fields().anyMatch(fieldHint ->
this.field.getName().equals(fieldHint.getName()));
}
}
}
| FieldHintPredicate |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/http1x/Http2UpgradeClientConnection.java | {
"start": 7977,
"end": 26292
} | class ____ implements HttpClientStream {
void handleUpgrade(io.vertx.core.http.impl.HttpClientConnection conn, HttpClientStream stream) {
upgradedStream = stream;
upgradedStream.headHandler(headHandler);
upgradedStream.dataHandler(chunkHandler);
upgradedStream.trailersHandler(trailersHandler);
upgradedStream.priorityChangeHandler(priorityHandler);
upgradedStream.exceptionHandler(exceptionHandler);
upgradedStream.resetHandler(resetHandler);
upgradedStream.drainHandler(drainHandler);
upgradedStream.continueHandler(continueHandler);
upgradedStream.earlyHintsHandler(earlyHintsHandler);
upgradedStream.pushHandler(pushHandler);
upgradedStream.customFrameHandler(unknownFrameHandler);
upgradedStream.closeHandler(closeHandler);
upgradingStream.headHandler(null);
upgradingStream.dataHandler(null);
upgradingStream.trailersHandler(null);
upgradingStream.priorityChangeHandler(null);
upgradingStream.exceptionHandler(null);
upgradingStream.drainHandler(null);
upgradingStream.continueHandler(null);
upgradingStream.earlyHintsHandler(null);
upgradingStream.pushHandler(null);
upgradingStream.customFrameHandler(null);
upgradingStream.closeHandler(null);
headHandler = null;
chunkHandler = null;
trailersHandler = null;
priorityHandler = null;
exceptionHandler = null;
resetHandler = null;
drainHandler = null;
continueHandler = null;
earlyHintsHandler = null;
pushHandler = null;
closeHandler = null;
upgradedConnection.current = conn;
conn.closeHandler(upgradedConnection.closeHandler);
conn.exceptionHandler(upgradedConnection.exceptionHandler);
conn.pingHandler(upgradedConnection.pingHandler);
conn.goAwayHandler(upgradedConnection.goAwayHandler);
conn.shutdownHandler(upgradedConnection.shutdownHandler);
conn.remoteSettingsHandler(upgradedConnection.remoteSettingsHandler);
conn.evictionHandler(upgradedConnection.evictionHandler);
conn.concurrencyChangeHandler(upgradedConnection.concurrencyChangeHandler);
conn.invalidMessageHandler(upgradedConnection.invalidMessageHandler);
Handler<Long> concurrencyChangeHandler = upgradedConnection.concurrencyChangeHandler;
upgradedConnection.closeHandler = null;
upgradedConnection.exceptionHandler = null;
upgradedConnection.pingHandler = null;
upgradedConnection.goAwayHandler = null;
upgradedConnection.shutdownHandler = null;
upgradedConnection.remoteSettingsHandler = null;
upgradedConnection.evictionHandler = null;
upgradedConnection.concurrencyChangeHandler = null;
upgradedConnection.invalidMessageHandler = null;
concurrencyChangeHandler.handle(conn.concurrency());
}
private final Http1xClientConnection upgradingConnection;
private final Http2ChannelUpgrade upgrade;
private final HttpClientStream upgradingStream;
private final Http2UpgradeClientConnection upgradedConnection;
private final long maxLifetimeMillis;
private final ClientMetrics<?, ?, ?> metrics;
private HttpClientStream upgradedStream;
private Handler<io.vertx.core.http.impl.HttpResponseHead> headHandler;
private Handler<Buffer> chunkHandler;
private Handler<MultiMap> trailersHandler;
private Handler<StreamPriority> priorityHandler;
private Handler<Throwable> exceptionHandler;
private Handler<Long> resetHandler;
private Handler<Void> drainHandler;
private Handler<Void> continueHandler;
private Handler<MultiMap> earlyHintsHandler;
private Handler<HttpClientPush> pushHandler;
private Handler<HttpFrame> unknownFrameHandler;
private Handler<Void> closeHandler;
UpgradingStream(HttpClientStream stream, Http2UpgradeClientConnection upgradedConnection, long maxLifetimeMillis, ClientMetrics<?, ?, ?> metrics, Http2ChannelUpgrade upgrade, Http1xClientConnection upgradingConnection) {
this.maxLifetimeMillis = maxLifetimeMillis;
this.upgradedConnection = upgradedConnection;
this.upgradingConnection = upgradingConnection;
this.upgradingStream = stream;
this.upgrade = upgrade;
this.metrics = metrics;
}
@Override
public io.vertx.core.http.impl.HttpClientConnection connection() {
return upgradedConnection;
}
/**
* HTTP/2 clear text upgrade here.
*/
@Override
public Future<Void> writeHead(io.vertx.core.http.impl.HttpRequestHead request,
boolean chunked,
Buffer buf,
boolean end,
StreamPriority priority,
boolean connect) {
UpgradeResult blah = new UpgradeResult() {
@Override
public void upgradeAccepted(io.vertx.core.http.impl.HttpClientConnection connection, HttpClientStream upgradedStream) {
UpgradingStream.this.handleUpgrade(connection, upgradedStream);
}
@Override
public void upgradeRejected() {
UpgradingStream.this.upgradedConnection.upgradeProcessed = true;
}
@Override
public void upgradeFailure(Throwable cause) {
upgradingConnection.closeHandler(null);
upgradingConnection.exceptionHandler(null);
upgradingConnection.evictionHandler(null);
upgradingConnection.concurrencyChangeHandler(null);
upgradingConnection.invalidMessageHandler(null);
log.error(cause.getMessage(), cause);
}
};
upgrade.upgrade(upgradingStream, request, buf, end,
upgradingConnection.channelHandlerContext().channel(), maxLifetimeMillis, metrics, blah);
PromiseInternal<Void> promise = upgradingStream.context().promise();
writeHead(request, chunked, buf, end, priority, connect, promise);
return promise.future();
}
private void writeHead(io.vertx.core.http.impl.HttpRequestHead head,
boolean chunked,
Buffer buf,
boolean end,
StreamPriority priority,
boolean connect,
Promise<Void> promise) {
EventExecutor exec = upgradingConnection.channelHandlerContext().executor();
if (exec.inEventLoop()) {
upgradingStream.writeHead(head, chunked, buf, end, priority, connect);
if (end) {
ChannelPipeline pipeline = upgradingConnection.channelHandlerContext().pipeline();
pipeline.fireUserEventTriggered(SEND_BUFFERED_MESSAGES_EVENT);
}
} else {
exec.execute(() -> writeHead(head, chunked, buf, end, priority, connect, promise));
}
}
@Override
public int id() {
return 1;
}
@Override
public Object metric() {
return upgradingStream.metric();
}
@Override
public Object trace() {
return upgradingStream.trace();
}
@Override
public HttpVersion version() {
HttpClientStream s = upgradedStream;
if (s == null) {
s = upgradingStream;
}
return s.version();
}
@Override
public ContextInternal context() {
return upgradingStream.context();
}
@Override
public HttpClientStream continueHandler(Handler<Void> handler) {
if (upgradedStream != null) {
upgradedStream.continueHandler(handler);
} else {
upgradingStream.continueHandler(handler);
continueHandler = handler;
}
return this;
}
@Override
public HttpClientStream earlyHintsHandler(Handler<MultiMap> handler) {
if (upgradedStream != null) {
upgradedStream.earlyHintsHandler(handler);
} else {
upgradingStream.earlyHintsHandler(handler);
earlyHintsHandler = handler;
}
return this;
}
@Override
public HttpClientStream pushHandler(Handler<HttpClientPush> handler) {
if (upgradedStream != null) {
upgradedStream.pushHandler(handler);
} else {
upgradingStream.pushHandler(handler);
pushHandler = handler;
}
return this;
}
@Override
public HttpClientStream closeHandler(Handler<Void> handler) {
if (upgradedStream != null) {
upgradedStream.closeHandler(handler);
} else {
upgradingStream.closeHandler(handler);
closeHandler = handler;
}
return this;
}
@Override
public UpgradingStream drainHandler(Handler<Void> handler) {
if (upgradedStream != null) {
upgradedStream.drainHandler(handler);
} else {
upgradingStream.drainHandler(handler);
drainHandler = handler;
}
return this;
}
@Override
public HttpClientStream resetHandler(Handler<Long> handler) {
if (upgradedStream != null) {
upgradedStream.resetHandler(handler);
} else {
upgradingStream.resetHandler(handler);
resetHandler = handler;
}
return this;
}
@Override
public UpgradingStream exceptionHandler(Handler<Throwable> handler) {
if (upgradedStream != null) {
upgradedStream.exceptionHandler(handler);
} else {
upgradingStream.exceptionHandler(handler);
exceptionHandler = handler;
}
return this;
}
@Override
public HttpClientStream headHandler(Handler<io.vertx.core.http.impl.HttpResponseHead> handler) {
if (upgradedStream != null) {
upgradedStream.headHandler(handler);
} else {
upgradingStream.headHandler(handler);
headHandler = handler;
}
return this;
}
@Override
public HttpClientStream dataHandler(Handler<Buffer> handler) {
if (upgradedStream != null) {
upgradedStream.dataHandler(handler);
} else {
upgradingStream.dataHandler(handler);
chunkHandler = handler;
}
return this;
}
@Override
public HttpClientStream trailersHandler(Handler<MultiMap> handler) {
if (upgradedStream != null) {
upgradedStream.trailersHandler(handler);
} else {
upgradingStream.trailersHandler(handler);
trailersHandler = handler;
}
return this;
}
@Override
public HttpClientStream customFrameHandler(Handler<HttpFrame> handler) {
if (upgradedStream != null) {
upgradedStream.customFrameHandler(handler);
} else {
upgradingStream.customFrameHandler(handler);
unknownFrameHandler = handler;
}
return this;
}
@Override
public HttpClientStream priorityChangeHandler(Handler<StreamPriority> handler) {
if (upgradedStream != null) {
upgradedStream.priorityChangeHandler(handler);
} else {
upgradingStream.priorityChangeHandler(handler);
priorityHandler = handler;
}
return this;
}
@Override
public HttpClientStream setWriteQueueMaxSize(int maxSize) {
if (upgradedStream != null) {
upgradedStream.setWriteQueueMaxSize(maxSize);
} else {
upgradingStream.setWriteQueueMaxSize(maxSize);
}
return this;
}
@Override
public boolean isWritable() {
if (upgradedStream != null) {
return upgradedStream.isWritable();
} else {
return upgradingStream.isWritable();
}
}
@Override
public Future<Void> writeChunk(Buffer buf, boolean end) {
EventExecutor exec = upgradingConnection.channelHandlerContext().executor();
if (exec.inEventLoop()) {
Future<Void> future = upgradingStream.writeChunk(buf, end);
if (end) {
ChannelPipeline pipeline = upgradingConnection.channelHandlerContext().pipeline();
future = future.andThen(ar -> {
if (ar.succeeded()) {
pipeline.fireUserEventTriggered(SEND_BUFFERED_MESSAGES_EVENT);
}
});
}
return future;
} else {
Promise<Void> promise = upgradingStream.context().promise();
exec.execute(() -> {
Future<Void> future = writeChunk(buf, end);
future.onComplete(promise);
});
return promise.future();
}
}
@Override
public Future<Void> writeFrame(int type, int flags, Buffer payload) {
if (upgradedStream != null) {
return upgradedStream.writeFrame(type, flags, payload);
} else {
return upgradingStream.writeFrame(type, flags, payload);
}
}
@Override
public HttpClientStream pause() {
if (upgradedStream != null) {
upgradedStream.pause();
} else {
upgradingStream.pause();
}
return this;
}
@Override
public HttpClientStream fetch(long amount) {
if (upgradedStream != null) {
upgradedStream.fetch(amount);
} else {
upgradingStream.fetch(amount);
}
return this;
}
@Override
public Future<Void> writeReset(long code) {
if (upgradedStream != null) {
return upgradedStream.writeReset(code);
} else {
return upgradingStream.writeReset(code);
}
}
@Override
public StreamPriority priority() {
if (upgradedStream != null) {
return upgradedStream.priority();
} else {
return upgradingStream.priority();
}
}
@Override
public HttpClientStream updatePriority(StreamPriority streamPriority) {
if (upgradedStream != null) {
upgradedStream.updatePriority(streamPriority);
} else {
upgradingStream.updatePriority(streamPriority);
}
return this;
}
}
@Override
public Future<HttpClientStream> createStream(ContextInternal context) {
if (current instanceof Http1xClientConnection && !upgradeProcessed) {
return current
.createStream(context)
.map(stream -> new UpgradingStream(stream, this, maxLifetimeMillis, metrics, upgrade, (Http1xClientConnection) current));
} else {
return current
.createStream(context)
.map(stream -> new DelegatingStream(this, stream));
}
}
@Override
public ContextInternal context() {
return current.context();
}
@Override
public HttpConnection remoteSettingsHandler(Handler<Http2Settings> handler) {
if (current instanceof Http1xClientConnection) {
remoteSettingsHandler = handler;
} else {
current.remoteSettingsHandler(handler);
}
return this;
}
@Override
public HttpConnection pingHandler(@Nullable Handler<Buffer> handler) {
if (current instanceof Http1xClientConnection) {
pingHandler = handler;
} else {
current.pingHandler(handler);
}
return this;
}
@Override
public HttpConnection goAwayHandler(@Nullable Handler<GoAway> handler) {
if (current instanceof Http1xClientConnection) {
goAwayHandler = handler;
} else {
current.goAwayHandler(handler);
}
return this;
}
@Override
public HttpConnection shutdownHandler(@Nullable Handler<Void> handler) {
if (current instanceof Http1xClientConnection) {
shutdownHandler = handler;
} else {
current.shutdownHandler(handler);
}
return this;
}
@Override
public HttpConnection closeHandler(Handler<Void> handler) {
if (current instanceof Http1xClientConnection) {
closeHandler = handler;
}
current.closeHandler(handler);
return this;
}
@Override
public HttpConnection exceptionHandler(Handler<Throwable> handler) {
if (current instanceof Http1xClientConnection) {
exceptionHandler = handler;
}
current.exceptionHandler(handler);
return this;
}
@Override
public io.vertx.core.http.impl.HttpClientConnection evictionHandler(Handler<Void> handler) {
if (current instanceof Http1xClientConnection) {
evictionHandler = handler;
}
current.evictionHandler(handler);
return this;
}
@Override
public io.vertx.core.http.impl.HttpClientConnection invalidMessageHandler(Handler<Object> handler) {
if (current instanceof Http1xClientConnection) {
invalidMessageHandler = handler;
}
current.invalidMessageHandler(handler);
return this;
}
@Override
public io.vertx.core.http.impl.HttpClientConnection concurrencyChangeHandler(Handler<Long> handler) {
if (current instanceof Http1xClientConnection) {
concurrencyChangeHandler = handler;
}
current.concurrencyChangeHandler(handler);
return this;
}
@Override
public HttpConnection goAway(long errorCode, int lastStreamId, Buffer debugData) {
return current.goAway(errorCode, lastStreamId, debugData);
}
@Override
public Future<Void> shutdown(long timeout, TimeUnit unit) {
return current.shutdown(timeout, unit);
}
@Override
public Future<Void> updateSettings(Http2Settings settings) {
return current.updateSettings(settings);
}
@Override
public Http2Settings settings() {
return current.settings();
}
@Override
public Http2Settings remoteSettings() {
return current.remoteSettings();
}
@Override
public Future<Buffer> ping(Buffer data) {
return current.ping(data);
}
@Override
public SocketAddress remoteAddress() {
return current.remoteAddress();
}
@Override
public SocketAddress remoteAddress(boolean real) {
return current.remoteAddress(real);
}
@Override
public SocketAddress localAddress() {
return current.localAddress();
}
@Override
public SocketAddress localAddress(boolean real) {
return current.localAddress(real);
}
@Override
public boolean isSsl() {
return current.isSsl();
}
@Override
public SSLSession sslSession() {
return current.sslSession();
}
@Override
public boolean isValid() {
return current.isValid();
}
@Override
public String indicatedServerName() {
return current.indicatedServerName();
}
@Override
public String toString() {
return getClass().getSimpleName() + "[current=" + current.getClass().getSimpleName() + "]";
}
/**
* The outcome of the upgrade signalled by the upgrade.
*/
public | UpgradingStream |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 836719,
"end": 838798
} | class ____ extends YamlDeserializerBase<RefErrorHandlerDefinition> {
public RefErrorHandlerDefinitionDeserializer() {
super(RefErrorHandlerDefinition.class);
}
@Override
protected RefErrorHandlerDefinition newInstance() {
return new RefErrorHandlerDefinition();
}
@Override
protected RefErrorHandlerDefinition newInstance(String value) {
return new RefErrorHandlerDefinition(value);
}
@Override
protected boolean setProperty(RefErrorHandlerDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "ref": {
String val = asText(node);
target.setRef(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "ref",
inline = true,
types = org.apache.camel.model.language.RefExpression.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Ref",
description = "Uses an existing expression from the registry.",
deprecated = false,
properties = {
@YamlProperty(name = "expression", type = "string", required = true, description = "The expression value in your chosen language syntax", displayName = "Expression"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "resultType", type = "string", description = "Sets the | RefErrorHandlerDefinitionDeserializer |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/PermissionChecker1stMethodArg.java | {
"start": 243,
"end": 477
} | class ____ extends AbstractNthMethodArgChecker {
@PermissionChecker("1st-arg")
boolean is1stMethodArgOk(Object one, SecurityIdentity identity) {
return this.argsOk(1, one, identity);
}
}
| PermissionChecker1stMethodArg |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsClientMetricsDelegatingReporterTest.java | {
"start": 1427,
"end": 5017
} | class ____ {
private MockAdminClient mockAdminClient;
private StreamsClientMetricsDelegatingReporter streamsClientMetricsDelegatingReporter;
private KafkaMetric streamClientMetricOne;
private KafkaMetric streamClientMetricTwo;
private KafkaMetric streamClientMetricThree;
private KafkaMetric kafkaMetricWithThreadIdTag;
private final Object lock = new Object();
private final MetricConfig metricConfig = new MetricConfig();
@BeforeEach
public void setup() {
mockAdminClient = new MockAdminClient();
streamsClientMetricsDelegatingReporter = new StreamsClientMetricsDelegatingReporter(mockAdminClient, "adminClientId");
final Map<String, String> threadIdTagMap = new HashMap<>();
final String threadId = "abcxyz-StreamThread-1";
threadIdTagMap.put("thread-id", threadId);
final MetricName metricNameOne = new MetricName("metricOne", "stream-metrics", "description for metric one", new HashMap<>());
final MetricName metricNameTwo = new MetricName("metricTwo", "stream-metrics", "description for metric two", new HashMap<>());
final MetricName metricNameThree = new MetricName("metricThree", "stream-metrics", "description for metric three", new HashMap<>());
final MetricName metricNameFour = new MetricName("metricThree", "thread-metrics", "description for metric three", threadIdTagMap);
streamClientMetricOne = new KafkaMetric(lock, metricNameOne, (Measurable) (m, now) -> 1.0, metricConfig, Time.SYSTEM);
streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM);
streamClientMetricThree = new KafkaMetric(lock, metricNameThree, (Measurable) (m, now) -> 3.0, metricConfig, Time.SYSTEM);
kafkaMetricWithThreadIdTag = new KafkaMetric(lock, metricNameFour, (Measurable) (m, now) -> 4.0, metricConfig, Time.SYSTEM);
}
@AfterEach
public void tearDown() {
mockAdminClient.close();
}
@Test
public void shouldInitMetrics() {
final List<KafkaMetric> metrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree, kafkaMetricWithThreadIdTag);
streamsClientMetricsDelegatingReporter.init(metrics);
final List<KafkaMetric> expectedMetrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree);
assertEquals(expectedMetrics, mockAdminClient.addedMetrics(),
"Should register metrics from init method");
}
@Test
public void shouldRegisterCorrectMetrics() {
streamsClientMetricsDelegatingReporter.metricChange(kafkaMetricWithThreadIdTag);
assertEquals(0, mockAdminClient.addedMetrics().size());
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne);
assertEquals(1, mockAdminClient.addedMetrics().size(),
"Should register client instance metrics only");
}
@Test
public void metricRemoval() {
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne);
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricTwo);
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricThree);
assertEquals(3, mockAdminClient.addedMetrics().size());
streamsClientMetricsDelegatingReporter.metricRemoval(streamClientMetricOne);
assertEquals(2, mockAdminClient.addedMetrics().size(),
"Should remove client instance metrics");
}
} | StreamsClientMetricsDelegatingReporterTest |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/junit/DefaultTestFinishedEvent.java | {
"start": 164,
"end": 846
} | class ____ implements TestFinishedEvent {
private final Object testClassInstance;
private final String testMethodName;
private final Throwable testFailure;
public DefaultTestFinishedEvent(
Object testClassInstance, String testMethodName, Throwable testFailure) {
this.testClassInstance = testClassInstance;
this.testMethodName = testMethodName;
this.testFailure = testFailure;
}
@Override
public Throwable getFailure() {
return testFailure;
}
@Override
public String getTestName() {
return testClassInstance.getClass().getSimpleName() + "." + testMethodName;
}
}
| DefaultTestFinishedEvent |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/Transport.java | {
"start": 3445,
"end": 5604
} | interface ____ extends Closeable, RefCounted {
/**
* The node this connection is associated with
*/
DiscoveryNode getNode();
/**
* Sends the request to the node this connection is associated with
* @param requestId see {@link ResponseHandlers#add(TransportResponseHandler, Connection, String)} for details
* @param action the action to execute
* @param request the request to send
* @param options request options to apply
* @throws NodeNotConnectedException if the given node is not connected
*/
void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException,
TransportException;
/**
* The listener will be called when this connection has completed closing. The {@link ActionListener#onResponse(Object)} method
* will be called when the connection closed gracefully, and the {@link ActionListener#onFailure(Exception)} method will be called
* when the connection has successfully closed, but an exception has prompted the close.
*
* @param listener to be called
*/
void addCloseListener(ActionListener<Void> listener);
boolean isClosed();
/**
* Returns the version of the data to communicate in this channel.
*/
TransportVersion getTransportVersion();
/**
* Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to
* the original connection.
*/
default Object getCacheKey() {
return this;
}
@Override
void close();
/**
* Called after this connection is removed from the transport service.
*/
void onRemoved();
/**
* Similar to {@link #addCloseListener} except that these listeners are notified once the connection is removed from the transport
* service.
*/
void addRemovedListener(ActionListener<Void> listener);
}
/**
* This | Connection |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | {
"start": 10168,
"end": 12990
} | class ____ implements AdminHelper.Command {
@Override
public String getName() {
return "-setPolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() +
" -path <path> [-policy <policy>] [-replicate]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>", "The path of the file/directory to set " +
"the erasure coding policy");
listing.addRow("<policy>", "The name of the erasure coding policy");
listing.addRow("-replicate",
"force 3x replication scheme on the directory");
return getShortUsage() + "\n" +
"Set the erasure coding policy for a file/directory.\n\n" +
listing.toString() + "\n" +
"-replicate and -policy are optional arguments. They cannot been " +
"used at the same time.\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path for setting the EC " +
"policy.\nUsage: " + getLongUsage());
return 1;
}
String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
args);
final boolean replicate = StringUtils.popOption("-replicate", args);
if (args.size() > 0) {
System.err.println(getName() + ": Too many arguments");
return 1;
}
if (replicate) {
if (ecPolicyName != null) {
System.err.println(getName() +
": -replicate and -policy cannot been used at the same time");
return 2;
}
ecPolicyName = ErasureCodeConstants.REPLICATION_POLICY_NAME;
}
final Path p = new Path(path);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
try {
dfs.setErasureCodingPolicy(p, ecPolicyName);
if (ecPolicyName == null){
ecPolicyName = "default";
}
System.out.println("Set " + ecPolicyName + " erasure coding policy on" +
" " + path);
RemoteIterator<FileStatus> dirIt = dfs.listStatusIterator(p);
if (dirIt.hasNext()) {
System.out.println("Warning: setting erasure coding policy on a " +
"non-empty directory will not automatically convert existing " +
"files to " + ecPolicyName + " erasure coding policy");
}
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
return 3;
}
return 0;
}
}
/** Command to unset the erasure coding policy set for a file/directory. */
private static | SetECPolicyCommand |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/BadInstanceofTest.java | {
"start": 1563,
"end": 1693
} | class ____ extends A {}
}
""")
.addOutputLines(
"Test.java",
"""
| C |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java | {
"start": 4006,
"end": 23761
} | class ____ extends ESTestCase {
private static final Logger logger = LogManager.getLogger(FileSettingsServiceTests.class);
private Environment env;
private ClusterService clusterService;
private ReservedClusterStateService controller;
private ThreadPool threadpool;
private FileSettingsService fileSettingsService;
private FileSettingsHealthTracker healthIndicatorTracker;
private Path watchedFile;
/**
* We're not testing health info publication here.
*/
public static final FileSettingsHealthIndicatorPublisher NOOP_PUBLISHER = (f, a) -> {};
@Before
public void setUp() throws Exception {
super.setUp();
// TODO remove me once https://github.com/elastic/elasticsearch/issues/115280 is closed
Loggers.setLevel(LogManager.getLogger(AbstractFileWatchingService.class), Level.DEBUG);
threadpool = new TestThreadPool("file_settings_service_tests");
clusterService = new ClusterService(
Settings.builder().put(NODE_NAME_SETTING.getKey(), "test").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadpool,
new TaskManager(Settings.EMPTY, threadpool, Set.of())
);
DiscoveryNode localNode = DiscoveryNodeUtils.create("node");
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.build();
clusterService.setNodeConnectionsService(mock(NodeConnectionsService.class));
clusterService.getClusterApplierService().setInitialState(clusterState);
clusterService.getMasterService().setClusterStatePublisher((e, pl, al) -> {
ClusterServiceUtils.setAllElapsedMillis(e);
al.onCommit(TimeValue.ZERO);
for (DiscoveryNode node : e.getNewState().nodes()) {
al.onNodeAck(node, null);
}
pl.onResponse(null);
});
clusterService.getMasterService().setClusterStateSupplier(() -> clusterState);
env = newEnvironment(Settings.EMPTY);
Files.createDirectories(env.configDir());
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
controller = spy(
new ReservedClusterStateService(
clusterService,
mock(RerouteService.class),
List.of(new ReservedClusterSettingsAction(clusterSettings)),
List.of()
)
);
healthIndicatorTracker = spy(new FileSettingsHealthTracker(Settings.EMPTY, NOOP_PUBLISHER));
fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, healthIndicatorTracker));
watchedFile = fileSettingsService.watchedFile();
}
@After
public void tearDown() throws Exception {
try {
if (fileSettingsService.lifecycleState() == Lifecycle.State.STARTED) {
logger.info("Stopping file settings service");
fileSettingsService.stop();
}
if (fileSettingsService.lifecycleState() == Lifecycle.State.STOPPED) {
logger.info("Closing file settings service");
fileSettingsService.close();
}
super.tearDown();
clusterService.close();
threadpool.shutdownNow();
} finally {
// TODO remove me once https://github.com/elastic/elasticsearch/issues/115280 is closed
Loggers.setLevel(LogManager.getLogger(AbstractFileWatchingService.class), Level.INFO);
}
}
public void testStartStop() {
fileSettingsService.start();
assertFalse(fileSettingsService.watching());
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
assertTrue(fileSettingsService.watching());
fileSettingsService.stop();
assertFalse(fileSettingsService.watching());
verify(healthIndicatorTracker, times(1)).startOccurred();
verify(healthIndicatorTracker, times(1)).stopOccurred();
}
public void testOperatorDirName() {
Path operatorPath = fileSettingsService.watchedFileDir();
assertTrue(operatorPath.startsWith(env.configDir()));
assertTrue(operatorPath.endsWith("operator"));
Path operatorSettingsFile = fileSettingsService.watchedFile();
assertTrue(operatorSettingsFile.startsWith(operatorPath));
assertTrue(operatorSettingsFile.endsWith("settings.json"));
}
@SuppressWarnings("unchecked")
public void testInitialFileError() throws Exception {
doAnswer(i -> {
((Consumer<Exception>) i.getArgument(3)).accept(new IllegalStateException("Some exception"));
return null;
}).when(controller).process(any(), any(XContentParser.class), eq(randomFrom(ReservedStateVersionCheck.values())), any());
Answer<?> checkExecute = i -> {
i.callRealMethod(); // should throw an exception
fail(i.getMethod().getName() + " should have thrown an exception");
return null;
};
doAnswer(checkExecute).when(fileSettingsService).processInitialFilesMissing();
doAnswer(checkExecute).when(fileSettingsService).processFile(eq(watchedFile), eq(false));
CountDownLatch latch = new CountDownLatch(1);
doAnswer(i -> {
try {
return i.callRealMethod();
} finally {
latch.countDown();
}
}).when(fileSettingsService).processFile(eq(watchedFile), eq(true));
Files.createDirectories(fileSettingsService.watchedFileDir());
// contents of the JSON don't matter, we just need a file to exist
writeTestFile(watchedFile, "{}");
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
// wait until the watcher thread has started, and it has discovered the file
assertTrue(latch.await(20, TimeUnit.SECONDS));
// Note: the name "processFileOnServiceStart" is a bit misleading because it is not
// referring to fileSettingsService.start(). Rather, it is referring to the initialization
// of the watcher thread itself, which occurs asynchronously when clusterChanged is first called.
verify(fileSettingsService, times(1)).processFile(eq(watchedFile), eq(true));
verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any());
assertEquals(YELLOW, currentHealthIndicatorResult().status());
verify(healthIndicatorTracker, times(1)).changeOccurred();
verify(healthIndicatorTracker, times(1)).failureOccurred(argThat(s -> s.startsWith(IllegalStateException.class.getName())));
}
@SuppressWarnings("unchecked")
public void testInitialFileWorks() throws Exception {
// Let's check that if we didn't throw an error that everything works
doAnswer(i -> {
((Consumer<Exception>) i.getArgument(3)).accept(null);
return null;
}).when(controller).process(any(), any(XContentParser.class), any(), any());
CountDownLatch processFileLatch = new CountDownLatch(1);
Answer<?> checkExecute = i -> {
try {
return i.callRealMethod();
} finally {
processFileLatch.countDown();
}
};
doAnswer(checkExecute).when(fileSettingsService).processFile(eq(watchedFile), eq(true));
Files.createDirectories(fileSettingsService.watchedFileDir());
// contents of the JSON don't matter, we just need a file to exist
writeTestFile(watchedFile, "{}");
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
longAwait(processFileLatch);
verify(fileSettingsService, times(1)).processFile(eq(watchedFile), eq(true));
verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any());
assertEquals(GREEN, currentHealthIndicatorResult().status());
verify(healthIndicatorTracker, times(1)).changeOccurred();
verify(healthIndicatorTracker, times(1)).successOccurred();
}
@SuppressWarnings("unchecked")
public void testProcessFileChanges() throws Exception {
doAnswer(i -> {
((Consumer<Exception>) i.getArgument(3)).accept(null);
return null;
}).when(controller).process(any(), any(XContentParser.class), any(), any());
// Await on some latches when files change so we can sync up
CountDownLatch processFileCreationLatch = new CountDownLatch(1);
doAnswer(i -> {
try {
return i.callRealMethod();
} finally {
processFileCreationLatch.countDown();
}
}).when(fileSettingsService).processFile(eq(watchedFile), eq(true));
CountDownLatch processFileChangeLatch = new CountDownLatch(1);
doAnswer(i -> {
try {
return i.callRealMethod();
} finally {
processFileChangeLatch.countDown();
}
}).when(fileSettingsService).processFile(eq(watchedFile), eq(false));
Files.createDirectories(fileSettingsService.watchedFileDir());
// contents of the JSON don't matter, we just need a file to exist
writeTestFile(watchedFile, "{}");
// It's important to configure all the mocks before calling start() here,
// because otherwise there can be races between configuration and use of mocks
// which leads to a UnfinishedStubbingException.
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
longAwait(processFileCreationLatch);
verify(fileSettingsService, times(1)).processFile(eq(watchedFile), eq(true));
verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any());
// Touch the file to get an update
Instant now = LocalDateTime.now(ZoneId.systemDefault()).toInstant(ZoneOffset.ofHours(0));
Files.setLastModifiedTime(watchedFile, FileTime.from(now));
longAwait(processFileChangeLatch);
verify(fileSettingsService, times(1)).processFile(eq(watchedFile), eq(false));
verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any());
assertEquals(GREEN, currentHealthIndicatorResult().status());
verify(healthIndicatorTracker, times(2)).changeOccurred();
verify(healthIndicatorTracker, times(2)).successOccurred();
}
public void testInvalidJSON() throws Exception {
// Chop off the functionality so we don't run too much of the actual cluster logic that we're not testing
doNothing().when(controller).updateErrorState(any());
doAnswer(i -> { throw new AssertionError("Parse error should happen before this process method is called"); }).when(controller)
.process(any(), any(ReservedStateChunk.class), any(), any());
// Don't really care about the initial state
Files.createDirectories(fileSettingsService.watchedFileDir());
doNothing().when(fileSettingsService).processInitialFilesMissing();
// Prepare to await on a barrier when the file changes so we can sync up
CyclicBarrier fileChangeBarrier = new CyclicBarrier(2);
doAnswer((Answer<?>) invocation -> {
try {
return invocation.callRealMethod();
} finally {
awaitOrBust(fileChangeBarrier);
}
}).when(fileSettingsService).onProcessFileChangesException(eq(watchedFile), any());
// Kick off the service
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
// Now break the JSON and wait
writeTestFile(watchedFile, "test_invalid_JSON");
awaitOrBust(fileChangeBarrier);
// These checks use atLeast(1) because the initial JSON is also invalid,
// and so we sometimes get two calls to these error-reporting methods
// depending on timing. Rather than trace down the root cause and fix
// it, we tolerate this for now because, hey, invalid JSON is invalid JSON
// and this is still testing what we want to test.
verify(fileSettingsService, Mockito.atLeast(1)).onProcessFileChangesException(
eq(watchedFile),
argThat(e -> unwrapException(e) instanceof XContentParseException)
);
assertEquals(YELLOW, currentHealthIndicatorResult().status());
verify(healthIndicatorTracker, Mockito.atLeast(1)).failureOccurred(contains(XContentParseException.class.getName()));
}
/**
* Looks for the ultimate cause of {@code e} by stripping off layers of bookkeeping exception wrappers.
*/
private Throwable unwrapException(Throwable e) {
while (e != null) {
if (e instanceof ExecutionException || e instanceof IllegalStateException) {
e = e.getCause();
} else {
break;
}
}
return e;
}
private static void awaitOrBust(CyclicBarrier barrier) {
try {
barrier.await(20, TimeUnit.SECONDS);
} catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
throw new AssertionError("Unexpected exception waiting for barrier", e);
}
}
@SuppressWarnings("unchecked")
public void testStopWorksInMiddleOfProcessing() throws Exception {
CountDownLatch processFileLatch = new CountDownLatch(1);
CountDownLatch deadThreadLatch = new CountDownLatch(1);
doAnswer((Answer<ReservedStateChunk>) invocation -> {
processFileLatch.countDown();
new Thread(() -> {
// Simulate a thread that never comes back and decrements the
// countdown latch in FileSettingsService.processFileSettings
try {
deadThreadLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}).start();
return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, BuildVersion.current()));
}).when(controller).parse(any(String.class), any());
doAnswer((Answer<Void>) invocation -> {
invocation.getArgument(1, ActionListener.class).onResponse(null);
return null;
}).when(controller).initEmpty(any(String.class), any());
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
assertTrue(fileSettingsService.watching());
Files.createDirectories(fileSettingsService.watchedFileDir());
// Make some fake settings file to cause the file settings service to process it
writeTestFile(watchedFile, "{}");
longAwait(processFileLatch);
// Stopping the service should interrupt the watcher thread, we should be able to stop
fileSettingsService.stop();
assertFalse(fileSettingsService.watching());
fileSettingsService.close();
// When the service is stopped, the health indicator should be green
assertEquals(GREEN, currentHealthIndicatorResult().status());
verify(healthIndicatorTracker).stopOccurred();
// let the deadlocked thread end, so we can cleanly exit the test
deadThreadLatch.countDown();
}
public void testHandleSnapshotRestoreClearsMetadata() {
ClusterState state = ClusterState.builder(clusterService.state())
.metadata(
Metadata.builder(clusterService.state().metadata())
.put(new ReservedStateMetadata(FileSettingsService.NAMESPACE, 1L, Map.of(), null))
.build()
)
.build();
Metadata.Builder metadata = Metadata.builder(state.metadata());
fileSettingsService.handleSnapshotRestore(state, ClusterState.builder(state), metadata, ProjectId.DEFAULT);
verify(controller).initEmpty(FileSettingsService.NAMESPACE, ActionListener.noop());
}
public void testHandleSnapshotRestoreResetsMetadata() throws Exception {
fileSettingsService.start();
fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE));
Files.createDirectories(fileSettingsService.watchedFileDir());
// contents of the JSON don't matter, we just need a file to exist
writeTestFile(watchedFile, "{}");
assertTrue(fileSettingsService.watching());
ClusterState state = ClusterState.builder(clusterService.state())
.metadata(
Metadata.builder(clusterService.state().metadata())
.put(new ReservedStateMetadata(FileSettingsService.NAMESPACE, 1L, Map.of(), null))
.build()
)
.build();
Metadata.Builder metadata = Metadata.builder();
fileSettingsService.handleSnapshotRestore(state, ClusterState.builder(state), metadata, ProjectId.DEFAULT);
assertThat(
metadata.build().reservedStateMetadata(),
hasEntry(
FileSettingsService.NAMESPACE,
new ReservedStateMetadata(FileSettingsService.NAMESPACE, ReservedStateMetadata.RESTORED_VERSION, Map.of(), null)
)
);
}
// helpers
private static void writeTestFile(Path path, String contents) throws IOException {
logger.info("Writing settings file under [{}]", path.toAbsolutePath());
Path tempFilePath = createTempFile();
Files.writeString(tempFilePath, contents);
try {
Files.move(tempFilePath, path, REPLACE_EXISTING, ATOMIC_MOVE);
} catch (AtomicMoveNotSupportedException e) {
logger.info("Atomic move not available. Falling back on non-atomic move to write [{}]", path.toAbsolutePath());
Files.move(tempFilePath, path, REPLACE_EXISTING);
}
}
// this waits for up to 20 seconds to account for watcher service differences between OSes
// on MacOS it may take up to 10 seconds for the Java watcher service to notice the file,
// on Linux is instantaneous. Windows is instantaneous too.
private static void longAwait(CountDownLatch latch) {
try {
assertTrue("longAwait: CountDownLatch did not reach zero within the timeout", latch.await(20, TimeUnit.SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
fail(e, "longAwait: interrupted waiting for CountDownLatch to reach zero");
}
}
private HealthIndicatorResult currentHealthIndicatorResult() {
return new FileSettingsService.FileSettingsHealthIndicatorService().calculate(healthIndicatorTracker.getCurrentInfo());
}
}
| FileSettingsServiceTests |
java | apache__flink | flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java | {
"start": 2763,
"end": 13650
} | class ____<E, SplitT extends SourceSplit> {
private static final Logger LOG = LoggerFactory.getLogger(SplitFetcherManager.class);
static final String THREAD_NAME_PREFIX = "Source Data Fetcher for ";
private final Consumer<Throwable> errorHandler;
/** An atomic integer to generate monotonically increasing fetcher ids. */
private final AtomicInteger fetcherIdGenerator;
/** A supplier to provide split readers. */
private final Supplier<SplitReader<E, SplitT>> splitReaderFactory;
/** Uncaught exception in the split fetchers. */
private final AtomicReference<Throwable> uncaughtFetcherException;
/** The element queue that the split fetchers will put elements into. */
private final FutureCompletingBlockingQueue<RecordsWithSplitIds<E>> elementsQueue;
/** A map keeping track of all the split fetchers. */
protected final Map<Integer, SplitFetcher<E, SplitT>> fetchers;
/**
* To Track the total number of fetcher threads that needs to be cleaned up when the
* SplitFetcherManager shuts down. It is different from the fetchers Map as the map only
* contains alive fetchers, but not shutting down fetchers.
*/
private final AtomicInteger fetchersToShutDown;
/**
* An executor service with two threads. One for the fetcher and one for the future completing
* thread.
*/
private final ExecutorService executors;
/** Indicating the split fetcher manager has closed or not. */
private volatile boolean closed;
/**
* Hook for handling finished splits in {@link SplitFetcher}, usually used for testing split
* finishing behavior of {@link SplitFetcher} and {@link SplitReader}.
*/
private final Consumer<Collection<String>> splitFinishedHook;
private final boolean allowUnalignedSourceSplits;
/**
* Create a split fetcher manager.
*
* @param splitReaderFactory a supplier that could be used to create split readers.
* @param configuration the configuration of this fetcher manager.
*/
public SplitFetcherManager(
Supplier<SplitReader<E, SplitT>> splitReaderFactory, Configuration configuration) {
this(splitReaderFactory, configuration, (ignore) -> {});
}
/**
* Create a split fetcher manager.
*
* @param splitReaderFactory a supplier that could be used to create split readers.
* @param configuration the configuration of this fetcher manager.
* @param splitFinishedHook Hook for handling finished splits in split fetchers.
*/
public SplitFetcherManager(
Supplier<SplitReader<E, SplitT>> splitReaderFactory,
Configuration configuration,
Consumer<Collection<String>> splitFinishedHook) {
this.elementsQueue =
new FutureCompletingBlockingQueue<>(
configuration.get(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY));
this.errorHandler =
new Consumer<Throwable>() {
@Override
public void accept(Throwable t) {
LOG.error("Received uncaught exception.", t);
if (!uncaughtFetcherException.compareAndSet(null, t)) {
// Add the exception to the exception list.
uncaughtFetcherException.get().addSuppressed(t);
}
// Wake up the main thread to let it know the exception.
elementsQueue.notifyAvailable();
}
};
this.splitReaderFactory = splitReaderFactory;
this.splitFinishedHook = splitFinishedHook;
this.uncaughtFetcherException = new AtomicReference<>(null);
this.fetcherIdGenerator = new AtomicInteger(0);
this.fetchers = new ConcurrentHashMap<>();
this.allowUnalignedSourceSplits = configuration.get(ALLOW_UNALIGNED_SOURCE_SPLITS);
this.fetchersToShutDown = new AtomicInteger(0);
// Create the executor with a thread factory that fails the source reader if one of
// the fetcher thread exits abnormally.
final String taskThreadName = Thread.currentThread().getName();
this.executors =
Executors.newCachedThreadPool(
r -> new Thread(r, THREAD_NAME_PREFIX + taskThreadName));
this.closed = false;
}
public abstract void addSplits(List<SplitT> splitsToAdd);
public abstract void removeSplits(List<SplitT> splitsToRemove);
public void pauseOrResumeSplits(
Collection<String> splitIdsToPause, Collection<String> splitIdsToResume) {
for (SplitFetcher<E, SplitT> fetcher : fetchers.values()) {
Map<String, SplitT> idToSplit = fetcher.assignedSplits();
List<SplitT> splitsToPause = lookupInAssignment(splitIdsToPause, idToSplit);
List<SplitT> splitsToResume = lookupInAssignment(splitIdsToResume, idToSplit);
if (!splitsToPause.isEmpty() || !splitsToResume.isEmpty()) {
fetcher.pauseOrResumeSplits(splitsToPause, splitsToResume);
}
}
}
private List<SplitT> lookupInAssignment(
Collection<String> splitIds, Map<String, SplitT> assignment) {
List<SplitT> splits = new ArrayList<>();
for (String s : splitIds) {
SplitT split = assignment.get(s);
if (split != null) {
splits.add(split);
}
}
return splits;
}
protected void startFetcher(SplitFetcher<E, SplitT> fetcher) {
executors.submit(fetcher);
}
/**
* Synchronize method to ensure no fetcher is created after the split fetcher manager has
* closed.
*
* @return the created split fetcher.
* @throws IllegalStateException if the split fetcher manager has closed.
*/
protected synchronized SplitFetcher<E, SplitT> createSplitFetcher() {
if (closed) {
throw new IllegalStateException("The split fetcher manager has closed.");
}
// Create SplitReader.
SplitReader<E, SplitT> splitReader = splitReaderFactory.get();
int fetcherId = fetcherIdGenerator.getAndIncrement();
fetchersToShutDown.incrementAndGet();
SplitFetcher<E, SplitT> splitFetcher =
new SplitFetcher<>(
fetcherId,
elementsQueue,
splitReader,
errorHandler,
() -> {
fetchers.remove(fetcherId);
fetchersToShutDown.decrementAndGet();
// We need this to synchronize status of fetchers to concurrent partners
// as
// ConcurrentHashMap's aggregate status methods including size, isEmpty,
// and
// containsValue are not designed for program control.
elementsQueue.notifyAvailable();
},
this.splitFinishedHook,
allowUnalignedSourceSplits);
fetchers.put(fetcherId, splitFetcher);
return splitFetcher;
}
/**
* Check and shutdown the fetchers that have completed their work.
*
* @return true if all the fetchers have completed the work, false otherwise.
*/
public boolean maybeShutdownFinishedFetchers() {
Iterator<Map.Entry<Integer, SplitFetcher<E, SplitT>>> iter = fetchers.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Integer, SplitFetcher<E, SplitT>> entry = iter.next();
SplitFetcher<E, SplitT> fetcher = entry.getValue();
if (fetcher.isIdle()) {
LOG.info("Closing splitFetcher {} because it is idle.", entry.getKey());
fetcher.shutdown(true);
iter.remove();
}
}
return fetchers.isEmpty();
}
/**
* Return the queue contains data produced by split fetchers.This method is Internal and only
* used in {@link SourceReaderBase}.
*/
@Internal
public FutureCompletingBlockingQueue<RecordsWithSplitIds<E>> getQueue() {
return elementsQueue;
}
/**
* Close the split fetcher manager.
*
* @param timeoutMs the max time in milliseconds to wait.
* @throws Exception when failed to close the split fetcher manager.
*/
public synchronized void close(long timeoutMs) throws Exception {
final long startTime = System.currentTimeMillis();
closed = true;
fetchers.values().forEach(SplitFetcher::shutdown);
// Actively drain the element queue in case there are previously shutting down
// fetcher threads blocking on putting batches into the element queue.
executors.submit(
() -> {
long timeElapsed = System.currentTimeMillis() - startTime;
while (fetchersToShutDown.get() > 0 && timeElapsed < timeoutMs) {
try {
elementsQueue
.getAvailabilityFuture()
.thenRun(() -> elementsQueue.poll().recycle())
.get(timeoutMs - timeElapsed, TimeUnit.MILLISECONDS);
} catch (ExecutionException ee) {
// Ignore the exception and continue.
} catch (Exception e) {
LOG.warn(
"Received exception when waiting for the fetchers to "
+ "shutdown.",
e);
break;
}
timeElapsed = System.currentTimeMillis() - startTime;
}
});
executors.shutdown();
long timeElapsed = System.currentTimeMillis() - startTime;
if (!executors.awaitTermination(timeoutMs - timeElapsed, TimeUnit.MILLISECONDS)) {
LOG.warn(
"Failed to close the split fetchers in {} ms. There are still {} split fetchers running",
timeoutMs,
fetchersToShutDown.get());
}
}
public void checkErrors() {
if (uncaughtFetcherException.get() != null) {
throw new RuntimeException(
"One or more fetchers have encountered exception",
uncaughtFetcherException.get());
}
}
// -----------------------
@VisibleForTesting
public int getNumAliveFetchers() {
return fetchers.size();
}
}
| SplitFetcherManager |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/handler/HandlerMappingIntrospectorTests.java | {
"start": 15999,
"end": 16336
} | class ____ implements CorsConfigurationSource {
private final CorsConfiguration corsConfig;
private TestHandler(CorsConfiguration corsConfig) {
this.corsConfig = corsConfig;
}
@Override
public CorsConfiguration getCorsConfiguration(HttpServletRequest request) {
return this.corsConfig;
}
}
private static | TestHandler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.