language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/ConfigTestUtils.java | {
"start": 688,
"end": 1425
} | class ____ {
// @formatter:off
public static final String AUTH_PROVIDER_XML = "<authentication-manager alias='authManager'>"
+ " <authentication-provider>"
+ " <user-service id='us'>"
+ " <user name='bob' password='{noop}bobspassword' authorities='ROLE_A,ROLE_B' />"
+ " <user name='bill' password='{noop}billspassword' authorities='ROLE_A,ROLE_B,AUTH_OTHER' />"
+ " <user name='admin' password='{noop}password' authorities='ROLE_ADMIN,ROLE_USER' />"
+ " <user name='user' password='{noop}password' authorities='ROLE_USER' />"
+ " </user-service>"
+ " </authentication-provider>"
+ "</authentication-manager>";
// @formatter:on
}
| ConfigTestUtils |
java | netty__netty | common/src/main/java/io/netty/util/internal/svm/RefCntSubstitution.java | {
"start": 900,
"end": 1197
} | class ____ {
private RefCntSubstitution() {
}
@Alias
@RecomputeFieldValue(
kind = RecomputeFieldValue.Kind.FieldOffset,
declClassName = "io.netty.util.internal.RefCnt",
name = "value"
)
public static long VALUE_OFFSET;
}
| RefCntSubstitution |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/WebApplicationContextUtils.java | {
"start": 16345,
"end": 16740
} | class ____ implements ObjectFactory<WebRequest>, Serializable {
@Override
public WebRequest getObject() {
ServletRequestAttributes requestAttr = currentRequestAttributes();
return new ServletWebRequest(requestAttr.getRequest(), requestAttr.getResponse());
}
@Override
public String toString() {
return "Current ServletWebRequest";
}
}
/**
* Inner | WebRequestObjectFactory |
java | elastic__elasticsearch | x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/ReindexChallengeRestIT.java | {
"start": 620,
"end": 2325
} | class ____ extends StandardVersusLogsIndexModeChallengeRestIT {
@Override
public void indexDocuments(
final CheckedSupplier<List<XContentBuilder>, IOException> baselineSupplier,
final CheckedSupplier<List<XContentBuilder>, IOException> contencontenderSupplierderSupplier
) throws IOException {
indexBaselineDocuments(baselineSupplier);
indexContenderDocuments();
}
private void indexBaselineDocuments(final CheckedSupplier<List<XContentBuilder>, IOException> documentsSupplier) throws IOException {
final StringBuilder sb = new StringBuilder();
int id = 0;
for (var document : documentsSupplier.get()) {
sb.append(Strings.format("{ \"create\": { \"_id\" : \"%d\" } }\n", id));
sb.append(Strings.toString(document)).append("\n");
id++;
}
performBulkRequest(sb.toString(), true);
}
private void indexContenderDocuments() throws IOException {
var reindexRequest = new Request("POST", "/_reindex?refresh=true");
reindexRequest.setJsonEntity(String.format(Locale.ROOT, """
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s",
"op_type": "create"
}
}
""", getBaselineDataStreamName(), getContenderDataStreamName()));
var response = client.performRequest(reindexRequest);
assertOK(response);
var body = entityAsMap(response);
assertThat("encountered failures when performing reindex:\n " + body, body.get("failures"), equalTo(List.of()));
}
}
| ReindexChallengeRestIT |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java | {
"start": 12294,
"end": 12764
} | class ____ implements ParquetRowGroupReader {
private final ParquetFileReader reader;
ParquetRowGroupReaderImpl(ParquetFileReader reader) {
this.reader = reader;
}
@Override
public PageReadStore readNextRowGroup() throws IOException {
return reader.readNextFilteredRowGroup();
}
@Override
public void close() throws IOException {
if (reader != null) {
reader.close();
}
}
}
}
| ParquetRowGroupReaderImpl |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/JSONPath.java | {
"start": 95866,
"end": 96857
} | class ____ implements Segment {
private final int index;
public ArrayAccessSegment(int index){
this.index = index;
}
public Object eval(JSONPath path, Object rootObject, Object currentObject) {
return path.getArrayItem(currentObject, index);
}
public boolean setValue(JSONPath path, Object currentObject, Object value) {
return path.setArrayItem(path, currentObject, index, value);
}
public boolean remove(JSONPath path, Object currentObject) {
return path.removeArrayItem(path, currentObject, index);
}
public void extract(JSONPath path, DefaultJSONParser parser, Context context) {
JSONLexerBase lexer = (JSONLexerBase) parser.lexer;
if (lexer.seekArrayToItem(index)
&& context.eval)
{
context.object = parser.parse();
}
}
}
static | ArrayAccessSegment |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java | {
"start": 618,
"end": 899
} | class ____ extends ActionType<AcknowledgedResponse> {
public static final StartSLMAction INSTANCE = new StartSLMAction();
public static final String NAME = "cluster:admin/slm/start";
protected StartSLMAction() {
super(NAME);
}
public static | StartSLMAction |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ccm/CCMModelTests.java | {
"start": 932,
"end": 2771
} | class ____ extends AbstractBWCWireSerializationTestCase<CCMModel> {
public void testToXContent() throws IOException {
var model = new CCMModel(new SecureString("secret".toCharArray()));
var builder = XContentFactory.contentBuilder(XContentType.JSON);
model.toXContent(builder, null);
assertThat(Strings.toString(builder), is(XContentHelper.stripWhitespace("""
{
"api_key": "secret"
}
""")));
}
public void testFromXContentBytes() throws IOException {
String json = """
{
"api_key": "test_key"
}
""";
var model = CCMModel.fromXContentBytes(new BytesArray(json));
assertThat(model.apiKey().toString(), is("test_key"));
}
public void testFromXContentBytes_ThrowsException_WhenApiKeyMissing() {
String json = """
{
}
""";
var exception = expectThrows(IllegalArgumentException.class, () -> CCMModel.fromXContentBytes(new BytesArray(json)));
assertThat(exception.getMessage(), containsString("Required [api_key]"));
}
@Override
protected CCMModel mutateInstanceForVersion(CCMModel instance, TransportVersion version) {
return instance;
}
@Override
protected Writeable.Reader<CCMModel> instanceReader() {
return CCMModel::new;
}
@Override
protected CCMModel createTestInstance() {
return new CCMModel(new SecureString(randomAlphaOfLength(10).toCharArray()));
}
@Override
protected CCMModel mutateInstance(CCMModel instance) throws IOException {
var originalString = instance.apiKey().toString();
return new CCMModel(new SecureString((originalString + "modified").toCharArray()));
}
}
| CCMModelTests |
java | grpc__grpc-java | interop-testing/src/main/java/io/grpc/testing/integration/AbstractInteropTest.java | {
"start": 86406,
"end": 87438
} | class ____<T> implements MethodDescriptor.Marshaller<T> {
private final MethodDescriptor.Marshaller<T> delegate;
volatile int lastOutSize;
volatile int lastInSize;
ByteSizeMarshaller(MethodDescriptor.Marshaller<T> delegate) {
this.delegate = delegate;
}
@Override
public InputStream stream(T value) {
InputStream is = delegate.stream(value);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
lastOutSize = (int) ByteStreams.copy(is, baos);
} catch (IOException e) {
throw new RuntimeException(e);
}
return new ByteArrayInputStream(baos.toByteArray());
}
@Override
public T parse(InputStream stream) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
lastInSize = (int) ByteStreams.copy(stream, baos);
} catch (IOException e) {
throw new RuntimeException(e);
}
return delegate.parse(new ByteArrayInputStream(baos.toByteArray()));
}
}
}
| ByteSizeMarshaller |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/scripting/xmltags/StaticTextSqlNode.java | {
"start": 736,
"end": 1014
} | class ____ implements SqlNode {
private final String text;
public StaticTextSqlNode(String text) {
this.text = text;
}
@Override
public boolean apply(DynamicContext context) {
context.appendSql(context.parseParam(text));
return true;
}
}
| StaticTextSqlNode |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/enum_/EnumFieldsTest4.java | {
"start": 565,
"end": 634
} | class ____ {
public Type[] types;
}
private static | Model |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/language/VariableExpression.java | {
"start": 1273,
"end": 1955
} | class ____ extends ExpressionDefinition {
public VariableExpression() {
}
protected VariableExpression(VariableExpression source) {
super(source);
}
public VariableExpression(String expression) {
super(expression);
}
private VariableExpression(Builder builder) {
super(builder);
}
@Override
public VariableExpression copyDefinition() {
return new VariableExpression(this);
}
@Override
public String getLanguage() {
return "variable";
}
/**
* {@code Builder} is a specific builder for {@link VariableExpression}.
*/
@XmlTransient
public static | VariableExpression |
java | google__dagger | javatests/dagger/internal/codegen/MapKeyProcessorTest.java | {
"start": 2258,
"end": 3222
} | enum ____ {",
" ADMIN,",
" LOGIN;",
"}");
CompilerTests.daggerCompiler(enumKeyFile, pathEnumFile)
.withAdditionalJavacProcessors(new AutoAnnotationProcessor())
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
// TODO(b/264464791): There is no AutoAnnotationProcessor for KSP.
assume().that(CompilerTests.backend(subject)).isNotEqualTo(Backend.KSP);
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/PathKeyCreator"));
});
}
@Test
public void nestedMapKeyCreatorFile() {
Source enumKeyFile = CompilerTests.javaSource("test.Container",
"package test;",
"import dagger.MapKey;",
"import java.lang.annotation.Retention;",
"import static java.lang.annotation.RetentionPolicy.RUNTIME;",
"",
"public | PathEnum |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstDoubleEvaluator.java | {
"start": 2824,
"end": 3317
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final EvalOperator.ExpressionEvaluator.Factory field;
public Factory(EvalOperator.ExpressionEvaluator.Factory field) {
this.field = field;
}
@Override
public MvFirstDoubleEvaluator get(DriverContext context) {
return new MvFirstDoubleEvaluator(field.get(context), context);
}
@Override
public String toString() {
return "MvFirst[field=" + field + "]";
}
}
}
| Factory |
java | quarkusio__quarkus | integration-tests/virtual-threads/virtual-threads-disabled/src/main/java/io/quarkus/virtual/disabled/Filters.java | {
"start": 446,
"end": 1423
} | class ____ {
@ServerRequestFilter(nonBlocking = true)
public void request(ContainerRequestContext requestContext) {
if (requestContext.getUriInfo().getPath().contains("/filter")) {
VirtualThreadsAssertions.assertWorkerOrEventLoopThread();
MDC.put("mdc", "test");
CDI.current().select(Counter.class).get().increment();
Vertx.currentContext().putLocal("filter", "test");
}
}
@ServerResponseFilter
public void getFilter(ContainerResponseContext responseContext) {
if (responseContext.getHeaders().get("X-filter") != null) {
VirtualThreadsAssertions.assertWorkerOrEventLoopThread();
// the request filter, the method, and here.
assert CDI.current().select(Counter.class).get().increment() == 3;
assert Vertx.currentContext().getLocal("test").equals("test test");
assert MDC.get("mdc").equals("test test");
}
}
}
| Filters |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/filter/SkipNulls4441Test.java | {
"start": 572,
"end": 1035
} | class ____ {
@JsonSetter(nulls = Nulls.SKIP)
private final List<Inner> listInner = new ArrayList<>();
private final String field1;
@JsonCreator
public Middle(@JsonProperty("field1") String field1) {
this.field1 = field1;
}
public List<Inner> getListInner() {
return listInner;
}
public String getField1() {
return field1;
}
}
static | Middle |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/mixed/FlowableSwitchMapMaybeTest.java | {
"start": 1393,
"end": 20724
} | class ____ extends RxJavaTest {
@Test
public void simple() {
Flowable.range(1, 5)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return Maybe.just(v);
}
})
.test()
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void simpleEmpty() {
Flowable.range(1, 5)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return Maybe.empty();
}
})
.test()
.assertResult();
}
@Test
public void simpleMixed() {
Flowable.range(1, 10)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v % 2 == 0) {
return Maybe.just(v);
}
return Maybe.empty();
}
})
.test()
.assertResult(2, 4, 6, 8, 10);
}
@Test
public void backpressured() {
TestSubscriber<Integer> ts = Flowable.range(1, 1024)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v % 2 == 0) {
return Maybe.just(v);
}
return Maybe.empty();
}
})
.test(0L);
// backpressure results items skipped
ts
.requestMore(1)
.assertResult(1024);
}
@Test
public void mainError() {
Flowable.error(new TestException())
.switchMapMaybe(Functions.justFunction(Maybe.never()))
.test()
.assertFailure(TestException.class);
}
@Test
public void innerError() {
Flowable.just(1)
.switchMapMaybe(Functions.justFunction(Maybe.error(new TestException())))
.test()
.assertFailure(TestException.class);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f)
throws Exception {
return f
.switchMapMaybe(Functions.justFunction(Maybe.never()));
}
}
);
}
@Test
public void limit() {
Flowable.range(1, 5)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return Maybe.just(v);
}
})
.take(3)
.test()
.assertResult(1, 2, 3);
}
@Test
public void switchOver() {
PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms1 = MaybeSubject.create();
final MaybeSubject<Integer> ms2 = MaybeSubject.create();
TestSubscriber<Integer> ts = pp.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 1) {
return ms1;
}
return ms2;
}
}).test();
ts.assertEmpty();
pp.onNext(1);
ts.assertEmpty();
assertTrue(ms1.hasObservers());
pp.onNext(2);
assertFalse(ms1.hasObservers());
assertTrue(ms2.hasObservers());
ms2.onError(new TestException());
assertFalse(pp.hasSubscribers());
ts.assertFailure(TestException.class);
}
@Test
public void switchOverDelayError() {
PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms1 = MaybeSubject.create();
final MaybeSubject<Integer> ms2 = MaybeSubject.create();
TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 1) {
return ms1;
}
return ms2;
}
}).test();
ts.assertEmpty();
pp.onNext(1);
ts.assertEmpty();
assertTrue(ms1.hasObservers());
pp.onNext(2);
assertFalse(ms1.hasObservers());
assertTrue(ms2.hasObservers());
ms2.onError(new TestException());
ts.assertEmpty();
assertTrue(pp.hasSubscribers());
pp.onComplete();
ts.assertFailure(TestException.class);
}
@Test
public void mainErrorInnerCompleteDelayError() {
PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return ms;
}
}).test();
ts.assertEmpty();
pp.onNext(1);
ts.assertEmpty();
assertTrue(ms.hasObservers());
pp.onError(new TestException());
assertTrue(ms.hasObservers());
ts.assertEmpty();
ms.onComplete();
ts.assertFailure(TestException.class);
}
@Test
public void mainErrorInnerSuccessDelayError() {
PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return ms;
}
}).test();
ts.assertEmpty();
pp.onNext(1);
ts.assertEmpty();
assertTrue(ms.hasObservers());
pp.onError(new TestException());
assertTrue(ms.hasObservers());
ts.assertEmpty();
ms.onSuccess(1);
ts.assertFailure(TestException.class, 1);
}
@Test
public void mapperCrash() {
Flowable.just(1)
.switchMapMaybe(new Function<Integer, MaybeSource<? extends Object>>() {
@Override
public MaybeSource<? extends Object> apply(Integer v)
throws Exception {
throw new TestException();
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void disposeBeforeSwitchInOnNext() {
final TestSubscriber<Integer> ts = new TestSubscriber<>();
Flowable.just(1)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
ts.cancel();
return Maybe.just(1);
}
}).subscribe(ts);
ts.assertEmpty();
}
@Test
public void disposeOnNextAfterFirst() {
final TestSubscriber<Integer> ts = new TestSubscriber<>();
Flowable.just(1, 2)
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 2) {
ts.cancel();
}
return Maybe.just(1);
}
}).subscribe(ts);
ts.assertValue(1)
.assertNoErrors()
.assertNotComplete();
}
@Test
public void cancel() {
PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return ms;
}
}).test();
ts.assertEmpty();
pp.onNext(1);
ts.assertEmpty();
assertTrue(pp.hasSubscribers());
assertTrue(ms.hasObservers());
ts.cancel();
assertFalse(pp.hasSubscribers());
assertFalse(ms.hasObservers());
}
@Test
public void mainErrorAfterTermination() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> s) {
s.onSubscribe(new BooleanSubscription());
s.onNext(1);
s.onError(new TestException("outer"));
}
}
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return Maybe.error(new TestException("inner"));
}
})
.to(TestHelper.<Integer>testConsumer())
.assertFailureAndMessage(TestException.class, "inner");
TestHelper.assertUndeliverable(errors, 0, TestException.class, "outer");
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void innerErrorAfterTermination() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final AtomicReference<MaybeObserver<? super Integer>> moRef = new AtomicReference<>();
TestSubscriberEx<Integer> ts = new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> s) {
s.onSubscribe(new BooleanSubscription());
s.onNext(1);
s.onError(new TestException("outer"));
}
}
.switchMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return new Maybe<Integer>() {
@Override
protected void subscribeActual(
MaybeObserver<? super Integer> observer) {
observer.onSubscribe(Disposable.empty());
moRef.set(observer);
}
};
}
})
.to(TestHelper.<Integer>testConsumer());
ts.assertFailureAndMessage(TestException.class, "outer");
moRef.get().onError(new TestException("inner"));
moRef.get().onComplete();
TestHelper.assertUndeliverable(errors, 0, TestException.class, "inner");
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void nextCancelRace() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
final TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
return ms;
}
}).test();
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.cancel();
}
};
TestHelper.race(r1, r2);
ts.assertNoErrors()
.assertNotComplete();
}
}
@Test
public void nextInnerErrorRace() {
final TestException ex = new TestException();
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
final TestSubscriberEx<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 1) {
return ms;
}
return Maybe.never();
}
}).to(TestHelper.<Integer>testConsumer());
pp.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(2);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ms.onError(ex);
}
};
TestHelper.race(r1, r2);
if (ts.errors().size() != 0) {
assertTrue(errors.isEmpty());
ts.assertFailure(TestException.class);
} else if (!errors.isEmpty()) {
TestHelper.assertUndeliverable(errors, 0, TestException.class);
}
} finally {
RxJavaPlugins.reset();
}
}
}
@Test
public void mainErrorInnerErrorRace() {
final TestException ex = new TestException();
final TestException ex2 = new TestException();
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
final TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 1) {
return ms;
}
return Maybe.never();
}
}).test();
pp.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onError(ex);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ms.onError(ex2);
}
};
TestHelper.race(r1, r2);
ts.assertError(new Predicate<Throwable>() {
@Override
public boolean test(Throwable e) throws Exception {
return e instanceof TestException || e instanceof CompositeException;
}
});
if (!errors.isEmpty()) {
TestHelper.assertUndeliverable(errors, 0, TestException.class);
}
} finally {
RxJavaPlugins.reset();
}
}
}
@Test
public void nextInnerSuccessRace() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final MaybeSubject<Integer> ms = MaybeSubject.create();
final TestSubscriber<Integer> ts = pp.switchMapMaybeDelayError(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v)
throws Exception {
if (v == 1) {
return ms;
}
return Maybe.empty();
}
}).test();
pp.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(2);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ms.onSuccess(3);
}
};
TestHelper.race(r1, r2);
ts.assertNoErrors()
.assertNotComplete();
}
}
@Test
public void requestMoreOnNext() {
TestSubscriber<Integer> ts = new TestSubscriber<Integer>(1) {
@Override
public void onNext(Integer t) {
super.onNext(t);
requestMore(1);
}
};
Flowable.range(1, 5)
.switchMapMaybe(Functions.justFunction(Maybe.just(1)))
.subscribe(ts);
ts.assertResult(1, 1, 1, 1, 1);
}
@Test
public void undeliverableUponCancel() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Flowable<Integer>>() {
@Override
public Flowable<Integer> apply(Flowable<Integer> upstream) {
return upstream.switchMapMaybe(new Function<Integer, Maybe<Integer>>() {
@Override
public Maybe<Integer> apply(Integer v) throws Throwable {
return Maybe.just(v).hide();
}
});
}
});
}
@Test
public void undeliverableUponCancelDelayError() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Flowable<Integer>>() {
@Override
public Flowable<Integer> apply(Flowable<Integer> upstream) {
return upstream.switchMapMaybeDelayError(new Function<Integer, Maybe<Integer>>() {
@Override
public Maybe<Integer> apply(Integer v) throws Throwable {
return Maybe.just(v).hide();
}
});
}
});
}
}
| FlowableSwitchMapMaybeTest |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/config/AbstractInterfaceConfigTest.java | {
"start": 9452,
"end": 9636
} | class ____ extends AbstractInterfaceConfig {
@Override
protected boolean isNeedCheckMethod() {
return false;
}
}
public static | InterfaceConfig2 |
java | apache__hadoop | hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java | {
"start": 1207,
"end": 1800
} | class ____ extends AbstractContractConcatTest {
@Override
protected AbstractFSContract createContract(Configuration configuration) {
return new AdlStorageContract(configuration);
}
@Test
public void testConcatMissingTarget() throws Throwable {
Path testPath = path("test");
Path zeroByteFile = new Path(testPath, "zero.txt");
Path target = new Path(testPath, "target");
touch(getFileSystem(), zeroByteFile);
// Concat on missing target is allowed on Adls file system.
getFileSystem().concat(target, new Path[] {zeroByteFile});
}
} | TestAdlContractConcatLive |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/CollectionBinderTests.java | {
"start": 21375,
"end": 21724
} | class ____ {
private MyCustomNoDefaultConstructorList items = new MyCustomNoDefaultConstructorList(
Collections.singletonList("foo"));
MyCustomNoDefaultConstructorList getItems() {
return this.items;
}
void setItems(MyCustomNoDefaultConstructorList items) {
this.items = items;
}
}
static | ExampleCustomNoDefaultConstructorBean |
java | apache__spark | common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationNames.java | {
"start": 854,
"end": 1099
} | class ____ {
public static final String UTF8_BINARY = "UTF8_BINARY";
public static final String UTF8_LCASE = "UTF8_LCASE";
public static final String UNICODE = "UNICODE";
public static final String UNICODE_CI = "UNICODE_CI";
}
| CollationNames |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java | {
"start": 1503,
"end": 3167
} | class ____<E extends ReferenceCountMap.ReferenceCounter> {
private Map<E, E> referenceMap = new ConcurrentHashMap<>();
/**
* Add the reference. If the instance already present, just increase the
* reference count.
*
* @param key Key to put in reference map
* @return Referenced instance
*/
public E put(E key) {
E value = referenceMap.putIfAbsent(key, key);
if (value == null) {
value = key;
}
value.incrementAndGetRefCount();
return value;
}
/**
* Delete the reference. Decrease the reference count for the instance, if
* any. On all references removal delete the instance from the map.
*
* @param key Key to remove the reference.
*/
public void remove(E key) {
E value = referenceMap.get(key);
if (value != null && value.decrementAndGetRefCount() == 0) {
referenceMap.remove(key);
}
}
/**
* Get entries in the reference Map.
*
* @return
*/
@VisibleForTesting
public ImmutableList<E> getEntries() {
return new ImmutableList.Builder<E>().addAll(referenceMap.keySet()).build();
}
/**
* Get the reference count for the key
*/
public long getReferenceCount(E key) {
ReferenceCounter counter = referenceMap.get(key);
if (counter != null) {
return counter.getRefCount();
}
return 0;
}
/**
* Get the number of unique elements
*/
public int getUniqueElementsSize() {
return referenceMap.size();
}
/**
* Clear the contents
*/
@VisibleForTesting
public void clear() {
referenceMap.clear();
}
/**
* Interface for the reference count holder
*/
public static | ReferenceCountMap |
java | quarkusio__quarkus | integration-tests/gradle/src/main/resources/quarkus-component-test/src/main/java/org/acme/Foo.java | {
"start": 192,
"end": 373
} | class ____ {
@Inject
Charlie charlie;
@ConfigProperty(name = "bar")
boolean bar;
public String ping() {
return bar ? charlie.ping() : "nok";
}
} | Foo |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/FormTest.java | {
"start": 536,
"end": 964
} | class ____ {
@TestHTTPResource
URI baseUri;
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest();
@Test
void shouldPassUrlEncodedAsForm() {
Client client = QuarkusRestClientBuilder.newBuilder().baseUri(baseUri).build(Client.class);
assertThat(client.echo(new Form().param("name", "World"))).isEqualTo("Hello, World!");
}
@Path("/hello")
public | FormTest |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpChangedReadLockIT.java | {
"start": 1359,
"end": 3249
} | class ____ extends FtpServerTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(FtpChangedReadLockIT.class);
@TempDir
Path testDirectory;
protected String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}"
+ "/changed?password=admin&readLock=changed&readLockCheckInterval=1000&delete=true";
}
@Test
public void testChangedReadLock() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedFileExists(testDirectory.resolve("out/slowfile.dat"));
writeSlowFile();
MockEndpoint.assertIsSatisfied(context);
String content = context.getTypeConverter().convertTo(String.class, testDirectory.resolve("out/slowfile.dat").toFile());
String[] lines = content.split(LS);
assertEquals(20, lines.length, "There should be 20 lines in the file");
for (int i = 0; i < 20; i++) {
assertEquals("Line " + i, lines[i]);
}
}
private void writeSlowFile() throws Exception {
LOG.debug("Writing slow file...");
createDirectory(service.ftpFile("changed"));
FileOutputStream fos = new FileOutputStream(service.ftpFile("changed/slowfile.dat").toFile(), true);
for (int i = 0; i < 20; i++) {
fos.write(("Line " + i + LS).getBytes());
LOG.debug("Writing line {}", i);
Thread.sleep(200);
}
fos.flush();
fos.close();
LOG.debug("Writing slow file DONE...");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl()).to(TestSupport.fileUri(testDirectory, "out"), "mock:result");
}
};
}
}
| FtpChangedReadLockIT |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeEqualByComparingFieldByFieldRecursively.java | {
"start": 1207,
"end": 5916
} | class ____ extends BasicErrorMessageFactory {
public static ErrorMessageFactory shouldBeEqualByComparingFieldByFieldRecursive(Object actual, Object other,
List<Difference> differences,
Representation representation) {
List<String> descriptionOfDifferences = differences.stream()
.map(difference -> describeDifference(difference, representation))
.collect(toList());
return new ShouldBeEqualByComparingFieldByFieldRecursively("%n" +
"Expecting actual:%n" +
" %s%n" +
"to be equal to:%n" +
" %s%n" +
"when recursively comparing field by field, but found the following difference(s):%n"
+ join(descriptionOfDifferences).with("%n".formatted()),
actual, other);
}
public static ErrorMessageFactory shouldBeEqualByComparingFieldByFieldRecursively(Object actual, Object other,
List<ComparisonDifference> differences,
RecursiveComparisonConfiguration recursiveComparisonConfiguration,
Representation representation) {
String differencesDescription = join(differences.stream()
.map(difference -> difference.multiLineDescription(representation))
.collect(toList())).with("%n%n".formatted());
String recursiveComparisonConfigurationDescription = recursiveComparisonConfiguration.multiLineDescription(representation);
String differencesCount = differences.size() == 1 ? "difference:%n" : "%s differences:%n";
// @format:off
return new ShouldBeEqualByComparingFieldByFieldRecursively("%n" +
"Expecting actual:%n" +
" %s%n" +
"to be equal to:%n" +
" %s%n" +
"when recursively comparing field by field, but found the following " + differencesCount +
"%n" +
escapePercent(differencesDescription) + "%n" +
"%n"+
"The recursive comparison was performed with this configuration:%n" +
recursiveComparisonConfigurationDescription, // don't use %s to avoid AssertJ formatting String with ""
actual, other, differences.size());
// @format:on
}
private ShouldBeEqualByComparingFieldByFieldRecursively(String message, Object... arguments) {
super(message, arguments);
}
private static String describeDifference(Difference difference, Representation representation) {
UnambiguousRepresentation unambiguousRepresentation = new UnambiguousRepresentation(representation, difference.getActual(),
difference.getOther());
String additionalInfo = difference.getDescription()
.map(desc -> "%n- reason : %s".formatted(escapePercent(desc)))
.orElse("");
return format("%nPath to difference: <%s>%n" +
"- actual : %s%n" +
"- expected: %s" + additionalInfo,
join(difference.getPath()).with("."),
escapePercent(unambiguousRepresentation.getActual()),
escapePercent(unambiguousRepresentation.getExpected()));
}
}
| ShouldBeEqualByComparingFieldByFieldRecursively |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/MySentEventNotifier.java | {
"start": 1052,
"end": 1403
} | class ____ extends EventNotifierSupport {
private final List<CamelEvent> events = new ArrayList<>();
public List<CamelEvent> getEvents() {
return events;
}
@Override
public void notify(CamelEvent event) {
if (event instanceof ExchangeSentEvent) {
events.add(event);
}
}
}
| MySentEventNotifier |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/message/FlowMessageFactory.java | {
"start": 964,
"end": 2920
} | interface ____ {
/**
* Creates a new entry message based on a format string with parameters.
*
* @param message format string
* @param params parameters
* @return the new entry message
* @since 2.20.0
*/
EntryMessage newEntryMessage(String message, Object... params);
/**
* Creates a new entry message based on an existing message.
*
* @param message the original message
* @return the new entry message
*/
EntryMessage newEntryMessage(Message message);
/**
* Creates a new exit message based on a return value and a forma string.
*
* @param format a format string
* @param result the return value
* @return the new exit message
* @since 2.20.0
*/
ExitMessage newExitMessage(String format, Object result);
/**
* Creates a new exit message based on no return value and an existing message.
*
* @param message the original entry message
* @return the new exit message
* @since 2.20.0
*/
ExitMessage newExitMessage(Message message);
/**
* Creates a new exit message based on a return value and an existing message.
*
* @param result the return value.
* @param message the original message
* @return the new exit message
*/
ExitMessage newExitMessage(Object result, Message message);
/**
* Creates a new exit message based on no return value and an existing entry message.
*
* @param message the original entry message
* @return the new exit message
*/
ExitMessage newExitMessage(EntryMessage message);
/**
* Creates a new exit message based on a return value and an existing entry message.
*
* @param result the return value.
* @param message the original entry message
* @return the new exit message
*/
ExitMessage newExitMessage(Object result, EntryMessage message);
}
| FlowMessageFactory |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2921ActiveAttachedArtifactsTest.java | {
"start": 1377,
"end": 1625
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that attached project artifacts can be resolved from the reactor as active project artifacts for
* consumption on other module's | MavenITmng2921ActiveAttachedArtifactsTest |
java | google__guava | android/guava-tests/test/com/google/common/eventbus/AsyncEventBusTest.java | {
"start": 2165,
"end": 2409
} | class ____ implements Executor {
List<Runnable> tasks = new ArrayList<>();
@Override
public void execute(Runnable task) {
tasks.add(task);
}
public List<Runnable> getTasks() {
return tasks;
}
}
}
| FakeExecutor |
java | quarkusio__quarkus | core/processor/src/main/java/io/quarkus/annotation/processor/documentation/config/ConfigDocExtensionProcessor.java | {
"start": 1203,
"end": 5470
} | class ____ implements ExtensionProcessor {
private Config config;
private Utils utils;
private ConfigAnnotationScanner configAnnotationScanner;
@Override
public void init(Config config, Utils utils) {
this.config = config;
this.utils = utils;
this.configAnnotationScanner = new ConfigAnnotationScanner(config, utils);
}
@Override
public void process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
Optional<TypeElement> configGroup = findAnnotation(annotations, Types.ANNOTATION_CONFIG_GROUP);
Optional<TypeElement> configRoot = findAnnotation(annotations, Types.ANNOTATION_CONFIG_ROOT);
Optional<TypeElement> configMapping = findAnnotation(annotations, Types.ANNOTATION_CONFIG_MAPPING);
// make sure we scan the groups before the root
if (configGroup.isPresent()) {
configAnnotationScanner.scanConfigGroups(roundEnv, configGroup.get());
}
if (configRoot.isPresent()) {
configAnnotationScanner.scanConfigRoots(roundEnv, configRoot.get());
}
if (configMapping.isPresent()) {
configAnnotationScanner.scanConfigMappingsWithoutConfigRoot(roundEnv, configMapping.get());
}
}
private Optional<TypeElement> findAnnotation(Set<? extends TypeElement> annotations, String annotationName) {
for (TypeElement annotation : annotations) {
if (annotationName.equals(annotation.getQualifiedName().toString())) {
return Optional.of(annotation);
}
}
return Optional.empty();
}
@Override
public void finalizeProcessing() {
ConfigCollector configCollector = configAnnotationScanner.finalizeProcessing();
// TODO radcortez drop this once we don't need them anymore
// we will still need to read the quarkus-javadoc.properties in the Dev UI for now to allow for extensions built with older Quarkus versions
Properties javadocProperties = new Properties();
for (Entry<String, JavadocElement> javadocElementEntry : configCollector.getJavadocElements().entrySet()) {
if (javadocElementEntry.getValue().description() == null
|| javadocElementEntry.getValue().description().isBlank()) {
continue;
}
javadocProperties.put(javadocElementEntry.getKey(), javadocElementEntry.getValue().description());
}
utils.filer().writeProperties(Outputs.META_INF_QUARKUS_JAVADOC, javadocProperties);
ConfigResolver configResolver = new ConfigResolver(config, utils, configCollector);
// the model is not written in the jar file
JavadocElements javadocElements = configResolver.resolveJavadoc();
if (!javadocElements.isEmpty()) {
utils.filer().writeModel(Outputs.QUARKUS_CONFIG_DOC_JAVADOC, javadocElements);
}
ResolvedModel resolvedModel = configResolver.resolveModel();
if (!resolvedModel.isEmpty()) {
Path resolvedModelPath = utils.filer().writeModel(Outputs.QUARKUS_CONFIG_DOC_MODEL, resolvedModel);
if (config.isDebug()) {
try {
utils.processingEnv().getMessager().printMessage(Kind.NOTE,
"Resolved model:\n\n" + Files.readString(resolvedModelPath));
} catch (IOException e) {
throw new IllegalStateException("Unable to read the resolved model from: " + resolvedModelPath, e);
}
}
}
// Generate files that will be stored in the jar and can be consumed freely.
// We generate JSON files to avoid requiring an additional dependency to the YAML mapper
if (!javadocElements.isEmpty()) {
utils.filer().writeJson(Outputs.META_INF_QUARKUS_CONFIG_JAVADOC_JSON, javadocElements);
}
if (!resolvedModel.isEmpty()) {
utils.filer().writeJson(Outputs.META_INF_QUARKUS_CONFIG_MODEL_JSON, resolvedModel);
}
if (!javadocElements.isEmpty() || !resolvedModel.isEmpty()) {
utils.filer().write(Outputs.META_INF_QUARKUS_CONFIG_MODEL_VERSION, "1");
}
}
}
| ConfigDocExtensionProcessor |
java | google__guava | android/guava-tests/test/com/google/common/collect/ImmutableSortedSetTest.java | {
"start": 44338,
"end": 44884
} | class ____ implements Comparable<SelfComparableExample> {
@Override
public int compareTo(SelfComparableExample o) {
return 0;
}
}
public void testBuilderGenerics_selfComparable() {
// testing simple creation
ImmutableSortedSet.Builder<SelfComparableExample> natural = ImmutableSortedSet.naturalOrder();
assertThat(natural).isNotNull();
ImmutableSortedSet.Builder<SelfComparableExample> reverse = ImmutableSortedSet.reverseOrder();
assertThat(reverse).isNotNull();
}
private static | SelfComparableExample |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigUtils.java | {
"start": 1438,
"end": 8124
} | class ____ {
private DatafeedConfigUtils() {}
/**
* Find the (date) histogram in {@code aggFactory} and extract its interval.
* Throws if there is no (date) histogram or if the histogram has sibling
* aggregations.
* @param aggFactory Aggregations factory
* @return The histogram interval
*/
public static long getHistogramIntervalMillis(AggregatorFactories.Builder aggFactory) {
AggregationBuilder histogram = getHistogramAggregation(aggFactory.getAggregatorFactories());
return getHistogramIntervalMillis(histogram);
}
/**
* Find and return (date) histogram in {@code aggregations}
* @param aggregations List of aggregations
* @return A {@link HistogramAggregationBuilder} or a {@link DateHistogramAggregationBuilder}
*/
public static AggregationBuilder getHistogramAggregation(Collection<AggregationBuilder> aggregations) {
if (aggregations.isEmpty()) {
throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM));
}
if (aggregations.size() != 1) {
throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM_NO_SIBLINGS);
}
AggregationBuilder agg = aggregations.iterator().next();
if (isHistogram(agg)) {
return agg;
} else {
return getHistogramAggregation(agg.getSubAggregations());
}
}
public static boolean isHistogram(AggregationBuilder aggregationBuilder) {
return aggregationBuilder instanceof HistogramAggregationBuilder
|| aggregationBuilder instanceof DateHistogramAggregationBuilder
|| isCompositeWithDateHistogramSource(aggregationBuilder);
}
public static boolean isCompositeWithDateHistogramSource(AggregationBuilder aggregationBuilder) {
return aggregationBuilder instanceof CompositeAggregationBuilder
&& ((CompositeAggregationBuilder) aggregationBuilder).sources()
.stream()
.anyMatch(DateHistogramValuesSourceBuilder.class::isInstance);
}
public static DateHistogramValuesSourceBuilder getDateHistogramValuesSource(CompositeAggregationBuilder compositeAggregationBuilder) {
for (CompositeValuesSourceBuilder<?> valuesSourceBuilder : compositeAggregationBuilder.sources()) {
if (valuesSourceBuilder instanceof DateHistogramValuesSourceBuilder dateHistogramValuesSourceBuilder) {
return dateHistogramValuesSourceBuilder;
}
}
throw ExceptionsHelper.badRequestException("[composite] aggregations require exactly one [date_histogram] value source");
}
/**
* Get the interval from {@code histogramAggregation} or throw an {@code IllegalStateException}
* if {@code histogramAggregation} is not a {@link HistogramAggregationBuilder} or a
* {@link DateHistogramAggregationBuilder}
*
* @param histogramAggregation Must be a {@link HistogramAggregationBuilder} or a
* {@link DateHistogramAggregationBuilder}
* @return The histogram interval
*/
public static long getHistogramIntervalMillis(AggregationBuilder histogramAggregation) {
if (histogramAggregation instanceof HistogramAggregationBuilder histo) {
return (long) histo.interval();
} else if (histogramAggregation instanceof DateHistogramAggregationBuilder dateHisto) {
return validateAndGetDateHistogramInterval(DateHistogramAggOrValueSource.fromAgg(dateHisto));
} else if (histogramAggregation instanceof CompositeAggregationBuilder composite) {
return validateAndGetDateHistogramInterval(DateHistogramAggOrValueSource.fromCompositeAgg(composite));
} else {
throw new IllegalStateException("Invalid histogram aggregation [" + histogramAggregation.getName() + "]");
}
}
/**
* Returns the date histogram interval as epoch millis if valid, or throws
* an {@link ElasticsearchException} with the validation error
*/
private static long validateAndGetDateHistogramInterval(DateHistogramAggOrValueSource dateHistogram) {
if (dateHistogram.timeZone() != null && dateHistogram.timeZone().normalized().equals(ZoneOffset.UTC) == false) {
throw ExceptionsHelper.badRequestException("ML requires date_histogram.time_zone to be UTC");
}
// TODO retains `dateHistogramInterval()`/`interval()` access for bwc logic, needs updating
if (dateHistogram.getCalendarInterval() != null) {
return validateAndGetCalendarInterval(dateHistogram.getCalendarInterval().toString());
} else if (dateHistogram.getFixedInterval() != null) {
return dateHistogram.getFixedInterval().estimateMillis();
} else {
throw new IllegalArgumentException("Must specify an interval for date_histogram");
}
}
public static long validateAndGetCalendarInterval(String calendarInterval) {
TimeValue interval;
Rounding.DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval);
if (dateTimeUnit != null) {
interval = switch (dateTimeUnit) {
case WEEK_OF_WEEKYEAR -> new TimeValue(7, TimeUnit.DAYS);
case DAY_OF_MONTH -> new TimeValue(1, TimeUnit.DAYS);
case HOUR_OF_DAY -> new TimeValue(1, TimeUnit.HOURS);
case MINUTE_OF_HOUR -> new TimeValue(1, TimeUnit.MINUTES);
case SECOND_OF_MINUTE -> new TimeValue(1, TimeUnit.SECONDS);
case MONTH_OF_YEAR, YEAR_OF_CENTURY, QUARTER_OF_YEAR, YEARS_OF_CENTURY, MONTHS_OF_YEAR -> throw ExceptionsHelper
.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval));
};
} else {
interval = TimeValue.parseTimeValue(calendarInterval, "date_histogram.calendar_interval");
}
if (interval.days() > 7) {
throw ExceptionsHelper.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval));
}
return interval.millis();
}
private static String invalidDateHistogramCalendarIntervalMessage(String interval) {
throw ExceptionsHelper.badRequestException(
"When specifying a date_histogram calendar interval ["
+ interval
+ "], ML does not accept intervals longer than a week because of "
+ "variable lengths of periods greater than a week"
);
}
private static | DatafeedConfigUtils |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPlugin.java | {
"start": 1133,
"end": 3050
} | class ____ implements Plugin<Project> {
public static final String SOURCE_SET_NAME = "yamlRestTest";
@Override
public void apply(Project project) {
project.getPluginManager().apply(LegacyRestTestBasePlugin.class);
project.getPluginManager().apply(RestResourcesPlugin.class);
// create source set
SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class);
SourceSet yamlTestSourceSet = sourceSets.create(SOURCE_SET_NAME);
registerTestTask(project, yamlTestSourceSet);
// setup the dependencies
setupYamlRestTestDependenciesDefaults(project, yamlTestSourceSet);
// setup the copy for the rest resources
project.getTasks().withType(CopyRestApiTask.class).configureEach(copyRestApiTask -> {
copyRestApiTask.setSourceResourceDir(
yamlTestSourceSet.getResources()
.getSrcDirs()
.stream()
.filter(f -> f.isDirectory() && f.getName().equals("resources"))
.findFirst()
.orElse(null)
);
});
// Register rest resources with source set
yamlTestSourceSet.getOutput()
.dir(
project.getTasks()
.withType(CopyRestApiTask.class)
.named(RestResourcesPlugin.COPY_REST_API_SPECS_TASK)
.flatMap(CopyRestApiTask::getOutputResourceDir)
);
yamlTestSourceSet.getOutput()
.dir(
project.getTasks()
.withType(CopyRestTestsTask.class)
.named(RestResourcesPlugin.COPY_YAML_TESTS_TASK)
.flatMap(CopyRestTestsTask::getOutputResourceDir)
);
GradleUtils.setupIdeForTestSourceSet(project, yamlTestSourceSet);
}
}
| LegacyYamlRestTestPlugin |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/RecipientListParallelAggregateThreadPoolIssueTest.java | {
"start": 1086,
"end": 2880
} | class ____ extends ContextTestSupport {
@Test
public void testRecipientListParallelALot() throws Exception {
String before = context.getExecutorServiceManager().resolveThreadName("foo");
for (int i = 0; i < 10; i++) {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.reset();
mock.expectedBodiesReceivedInAnyOrder("c", "b", "a");
template.sendBodyAndHeader("direct:start", "Hello World", "foo", "direct:a,direct:b,direct:c");
assertMockEndpointsSatisfied();
}
String after = context.getExecutorServiceManager().resolveThreadName("foo");
int num1 = context.getTypeConverter().convertTo(int.class, before);
int num2 = context.getTypeConverter().convertTo(int.class, after);
int diff = num2 - num1;
// should be at least 10 + 1 other threads (10 in parallel pool + 1 in
// aggregate pool)
// we run unit test per jmv fork, so there may be a hanging thread
assertTrue(diff >= 11, "There should be 12 or more threads in use, was: " + diff);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.getExecutorServiceManager().setThreadNamePattern("#counter#");
from("direct:start").recipientList(header("foo")).parallelProcessing();
from("direct:a").to("log:a").transform(constant("a")).to("mock:result");
from("direct:b").to("log:b").transform(constant("b")).to("mock:result");
from("direct:c").to("log:c").transform(constant("c")).to("mock:result");
}
};
}
}
| RecipientListParallelAggregateThreadPoolIssueTest |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/MessageHandler.java | {
"start": 846,
"end": 1105
} | interface ____ {
/**
* Handle the given message.
* @param message the message to be handled
* @throws MessagingException if the handler failed to process the message
*/
void handleMessage(Message<?> message) throws MessagingException;
}
| MessageHandler |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/web/server/RequestCacheTests.java | {
"start": 1951,
"end": 4992
} | class ____ {
private ServerHttpSecurity http = ServerHttpSecurityConfigurationBuilder.httpWithDefaultAuthentication();
@Test
public void defaultFormLoginRequestCache() {
// @formatter:off
SecurityWebFilterChain securityWebFilter = this.http
.authorizeExchange((authorize) -> authorize
.anyExchange().authenticated())
.formLogin(withDefaults())
.build();
WebTestClient webTestClient = WebTestClient
.bindToController(new SecuredPageController(), new WebTestClientBuilder.Http200RestController())
.webFilter(new WebFilterChainProxy(securityWebFilter))
.build();
WebDriver driver = WebTestClientHtmlUnitDriverBuilder
.webTestClientSetup(webTestClient)
.build();
// @formatter:on
DefaultLoginPage loginPage = SecuredPage.to(driver, DefaultLoginPage.class).assertAt();
// @formatter:off
SecuredPage securedPage = loginPage.loginForm()
.username("user")
.password("password")
.submit(SecuredPage.class);
// @formatter:on
securedPage.assertAt();
}
@Test
public void requestCacheNoOp() {
// @formatter:off
SecurityWebFilterChain securityWebFilter = this.http
.authorizeExchange((authorize) -> authorize
.anyExchange().authenticated())
.formLogin(withDefaults())
.requestCache((cache) -> cache
.requestCache(NoOpServerRequestCache.getInstance()))
.build();
WebTestClient webTestClient = WebTestClient
.bindToController(new SecuredPageController(), new WebTestClientBuilder.Http200RestController())
.webFilter(new WebFilterChainProxy(securityWebFilter))
.build();
WebDriver driver = WebTestClientHtmlUnitDriverBuilder
.webTestClientSetup(webTestClient)
.build();
// @formatter:on
DefaultLoginPage loginPage = SecuredPage.to(driver, DefaultLoginPage.class).assertAt();
// @formatter:off
HomePage securedPage = loginPage.loginForm()
.username("user")
.password("password")
.submit(HomePage.class);
// @formatter:on
securedPage.assertAt();
}
@Test
public void requestWhenCustomRequestCacheInLambdaThenCustomCacheUsed() {
// @formatter:off
SecurityWebFilterChain securityWebFilter = this.http
.authorizeExchange((authorize) -> authorize
.anyExchange().authenticated()
)
.formLogin(withDefaults())
.requestCache((requestCache) -> requestCache
.requestCache(NoOpServerRequestCache.getInstance())
)
.build();
WebTestClient webTestClient = WebTestClient
.bindToController(new SecuredPageController(), new WebTestClientBuilder.Http200RestController())
.webFilter(new WebFilterChainProxy(securityWebFilter))
.build();
WebDriver driver = WebTestClientHtmlUnitDriverBuilder
.webTestClientSetup(webTestClient)
.build();
// @formatter:on
DefaultLoginPage loginPage = SecuredPage.to(driver, DefaultLoginPage.class).assertAt();
// @formatter:off
HomePage securedPage = loginPage.loginForm()
.username("user")
.password("password")
.submit(HomePage.class);
// @formatter:on
securedPage.assertAt();
}
public static | RequestCacheTests |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxBufferTimeoutTest.java | {
"start": 1916,
"end": 16567
} | class ____ {
@AfterEach
public void tearDown() {
VirtualTimeScheduler.reset();
}
Flux<List<Integer>> scenario_bufferWithTimeoutAccumulateOnSize() {
return Flux.range(1, 6)
.delayElements(Duration.ofMillis(300))
.bufferTimeout(5, Duration.ofMillis(2000));
}
@Test
public void bufferWithTimeoutAccumulateOnSize() {
StepVerifier.withVirtualTime(this::scenario_bufferWithTimeoutAccumulateOnSize)
.thenAwait(Duration.ofMillis(1500))
.assertNext(s -> assertThat(s).containsExactly(1, 2, 3, 4, 5))
.thenAwait(Duration.ofMillis(2000))
.assertNext(s -> assertThat(s).containsExactly(6))
.verifyComplete();
}
Flux<List<Integer>> scenario_bufferWithTimeoutAccumulateOnTime() {
return Flux.range(1, 6)
.delayElements(Duration.ofNanos(300)).log("delayed")
.bufferTimeout(15, Duration.ofNanos(1500)).log("buffered");
}
@Test
public void bufferWithTimeoutAccumulateOnTime() {
StepVerifier.withVirtualTime(this::scenario_bufferWithTimeoutAccumulateOnTime)
//.create(this.scenario_bufferWithTimeoutAccumulateOnTime())
.thenAwait(Duration.ofNanos(1800))
.assertNext(s -> assertThat(s).containsExactly(1, 2, 3, 4, 5))
.thenAwait(Duration.ofNanos(2000))
.assertNext(s -> assertThat(s).containsExactly(6))
.verifyComplete();
}
Flux<List<Integer>> scenario_bufferWithTimeoutThrowingExceptionOnTimeOrSizeIfDownstreamDemandIsLow() {
return Flux.range(1, 6)
.delayElements(Duration.ofMillis(300))
.bufferTimeout(5, Duration.ofMillis(100));
}
@Test
public void bufferWithTimeoutThrowingExceptionOnTimeOrSizeIfDownstreamDemandIsLow() {
StepVerifier.withVirtualTime(this::scenario_bufferWithTimeoutThrowingExceptionOnTimeOrSizeIfDownstreamDemandIsLow, 0)
.expectSubscription()
.expectNoEvent(Duration.ofMillis(300))
.thenRequest(1)
.expectNoEvent(Duration.ofMillis(100))
.assertNext(s -> assertThat(s).containsExactly(1))
.expectNoEvent(Duration.ofMillis(300))
.verifyErrorSatisfies(e ->
assertThat(e)
.hasMessage("Could not emit buffer due to lack of requests")
.isInstanceOf(IllegalStateException.class)
);
}
@Test
void bufferWithTimeoutAvoidingNegativeRequests() {
final List<Long> requestPattern = new CopyOnWriteArrayList<>();
StepVerifier.withVirtualTime(() ->
Flux.range(1, 3)
.delayElements(Duration.ofMillis(100))
.doOnRequest(requestPattern::add)
.bufferTimeout(5, Duration.ofMillis(100)),
0)
.expectSubscription()
.expectNoEvent(Duration.ofMillis(100))
.thenRequest(2)
.expectNoEvent(Duration.ofMillis(100))
.assertNext(s -> assertThat(s).containsExactly(1))
.expectNoEvent(Duration.ofMillis(100))
.assertNext(s -> assertThat(s).containsExactly(2))
.thenRequest(1) // This should not cause a negative upstream request
.expectNoEvent(Duration.ofMillis(100))
.thenCancel()
.verify();
assertThat(requestPattern).allSatisfy(r -> assertThat(r).isPositive());
}
@Test
public void scanSubscriber() {
CoreSubscriber<List<String>> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
final Scheduler.Worker worker = Schedulers.boundedElastic()
.createWorker();
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 123, 1000, TimeUnit.MILLISECONDS,
worker, ArrayList::new);
try {
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
test.requested = 3L;
test.index = 100;
assertThat(test.scan(Scannable.Attr.RUN_ON)).isSameAs(worker);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(subscription);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(3L);
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(123);
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(23);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
finally {
worker.dispose();
}
}
@Test
public void scanOperator() {
final Flux<List<Integer>> flux = Flux.just(1).bufferTimeout(3, Duration.ofSeconds(1));
assertThat(flux).isInstanceOf(Scannable.class);
assertThat(from(flux).scan(Scannable.Attr.RUN_ON)).isSameAs(Schedulers.parallel());
assertThat(from(flux).scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
}
@Test
public void shouldShowActualSubscriberDemand() {
Subscription[] subscriptionsHolder = new Subscription[1];
CoreSubscriber<List<String>> actual = new LambdaSubscriber<>(null, e -> {}, null, s -> subscriptionsHolder[0] = s);
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 123, 1000, TimeUnit.MILLISECONDS, Schedulers.boundedElastic().createWorker(), ArrayList::new);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
subscriptionsHolder[0].request(10);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(10L);
subscriptionsHolder[0].request(5);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(15L);
}
@Test
public void downstreamDemandShouldBeAbleToDecreaseOnFullBuffer() {
Subscription[] subscriptionsHolder = new Subscription[1];
CoreSubscriber<List<String>> actual = new LambdaSubscriber<>(null, e -> {}, null, s -> subscriptionsHolder[0] = s);
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 5, 1000, TimeUnit.MILLISECONDS, Schedulers.boundedElastic().createWorker(), ArrayList::new);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
subscriptionsHolder[0].request(1);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(1L);
for (int i = 0; i < 5; i++) {
test.onNext(String.valueOf(i));
}
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(0L);
}
@Test
public void downstreamDemandShouldBeAbleToDecreaseOnTimeSpan() {
Subscription[] subscriptionsHolder = new Subscription[1];
CoreSubscriber<List<String>> actual = new LambdaSubscriber<>(null, e -> {}, null, s -> subscriptionsHolder[0] = s);
VirtualTimeScheduler timeScheduler = VirtualTimeScheduler.getOrSet();
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 5, 100, TimeUnit.MILLISECONDS, timeScheduler.createWorker(), ArrayList::new);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
subscriptionsHolder[0].request(1);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(1L);
timeScheduler.advanceTimeBy(Duration.ofMillis(100));
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(1L);
test.onNext(String.valueOf("0"));
timeScheduler.advanceTimeBy(Duration.ofMillis(100));
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(0L);
}
@Test
public void requestedFromUpstreamShouldNotExceedDownstreamDemand() {
Sinks.Many<String> sink = Sinks.many().multicast().onBackpressureBuffer();
Flux<String> emitter = sink.asFlux();
AtomicLong requestedOutstanding = new AtomicLong(0);
VirtualTimeScheduler scheduler = VirtualTimeScheduler.create();
Flux<List<String>> flux = emitter.doOnRequest(requestedOutstanding::addAndGet)
.bufferTimeout(5, Duration.ofMillis(100), scheduler)
.doOnNext(list -> requestedOutstanding.addAndGet(0 - list.size()));
StepVerifier.withVirtualTime(() -> flux, () -> scheduler, 0)
.expectSubscription()
.then(() -> assertThat(requestedOutstanding).hasValue(0))
.thenRequest(2)
.then(() -> assertThat(requestedOutstanding.get()).isEqualTo(10))
.then(() -> sink.emitNext("a", FAIL_FAST))
.thenAwait(Duration.ofMillis(100))
.assertNext(s -> assertThat(s).containsExactly("a"))
.then(() -> assertThat(requestedOutstanding).hasValue(9))
.thenRequest(1)
.then(() -> assertThat(requestedOutstanding).hasValue(10))
.thenCancel()
.verify();
}
@Test
public void exceedingUpstreamDemandResultsInError() {
Subscription[] subscriptionsHolder = new Subscription[1];
AtomicReference<Throwable> capturedException = new AtomicReference<>();
CoreSubscriber<List<String>> actual = new LambdaSubscriber<>(null, capturedException::set, null, s -> subscriptionsHolder[0] = s);
VirtualTimeScheduler scheduler = VirtualTimeScheduler.create();
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 5, 1000, TimeUnit.MILLISECONDS, scheduler.createWorker(), ArrayList::new);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
subscriptionsHolder[0].request(1);
for (int i = 0; i < 5; i++) {
test.onNext(String.valueOf(i));
}
assertThat(capturedException.get()).isNull();
test.onNext(String.valueOf(123));
assertThat(capturedException.get()).isInstanceOf(IllegalStateException.class)
.hasMessage("Unrequested element received");
}
@Test
public void scanSubscriberCancelled() {
CoreSubscriber<List<String>>
actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>> test = new FluxBufferTimeout.BufferTimeoutSubscriber<String, List<String>>(
actual, 123, 1000, TimeUnit.MILLISECONDS, Schedulers.boundedElastic().createWorker(), ArrayList::new);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
}
@Test
public void flushShouldNotRaceWithNext() {
for (int i = 0; i < 100; i++) {
AtomicInteger caller = new AtomicInteger();
AtomicBoolean stop = new AtomicBoolean();
Set<Integer> seen = new HashSet<>();
Consumer<List<Integer>> consumer = integers -> {
RuntimeException ex = new RuntimeException(integers.toString());
if (caller.getAndIncrement() == 0) {
for (Integer value : integers) {
if (!seen.add(value)) {
throw new IllegalStateException("Duplicate! " + value);
}
}
if (caller.decrementAndGet() != 0) {
stop.set(true);
throw ex;
}
}
else {
stop.set(true);
throw ex;
}
};
CoreSubscriber<List<Integer>> actual =
new LambdaSubscriber<>(consumer, null, null, null);
FluxBufferTimeout.BufferTimeoutSubscriber<Integer, List<Integer>> test =
new FluxBufferTimeout.BufferTimeoutSubscriber<Integer, List<Integer>>(
actual,
3,
1000,
TimeUnit.MILLISECONDS,
Schedulers.boundedElastic()
.createWorker(),
ArrayList::new);
test.onSubscribe(Operators.emptySubscription());
AtomicInteger counter = new AtomicInteger();
for (int j = 0; j < 500; j++) {
RaceTestUtils.race(() -> test.onNext(counter.getAndIncrement()), test.flushTask);
Assertions.assertThat(stop).isFalse();
}
test.onComplete();
assertThat(seen.size()).as(() -> seen.size() + " " + seen.toString())
.isEqualTo(500);
}
}
//see https://github.com/reactor/reactor-core/issues/1247
@Test
public void rejectedOnNextLeadsToOnError() {
Scheduler scheduler = Schedulers.newSingle("rejectedOnNextLeadsToOnError");
scheduler.dispose();
StepVerifier.create(Flux.just(1, 2, 3)
.bufferTimeout(4, Duration.ofMillis(500), scheduler))
.expectError(RejectedExecutionException.class)
.verify(Duration.ofSeconds(1));
}
@Test
public void discardOnCancel() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.never())
.bufferTimeout(10, Duration.ofMillis(100)))
.thenAwait(Duration.ofMillis(10))
.thenCancel()
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnFlushWithoutRequest() {
TestPublisher<Integer> testPublisher = TestPublisher.createNoncompliant(TestPublisher.Violation.REQUEST_OVERFLOW);
StepVerifier.create(testPublisher
.flux()
.bufferTimeout(10, Duration.ofMillis(200)),
StepVerifierOptions.create().initialRequest(0))
.then(() -> testPublisher.emit(1, 2, 3))
.thenAwait(Duration.ofMillis(250))
.expectErrorMatches(Exceptions::isOverflow)
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnTimerRejected() {
Scheduler scheduler = Schedulers.newSingle("discardOnTimerRejected");
StepVerifier.create(Flux.just(1, 2, 3)
.doOnNext(n -> scheduler.dispose())
.bufferTimeout(10, Duration.ofMillis(100), scheduler))
.expectErrorSatisfies(e -> assertThat(e).isInstanceOf(RejectedExecutionException.class))
.verifyThenAssertThat()
.hasDiscardedExactly(1);
}
@Test
public void discardOnError() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.error(new IllegalStateException("boom")))
.bufferTimeout(10, Duration.ofMillis(100)))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
}
| FluxBufferTimeoutTest |
java | quarkusio__quarkus | test-framework/junit5-config/src/main/java/io/quarkus/test/config/QuarkusClassOrderer.java | {
"start": 477,
"end": 1897
} | class ____ implements ClassOrderer {
private final ClassOrderer delegate;
public QuarkusClassOrderer() {
// Our config will have been initialised onto the classloader that was active when JUnit starts, so read it from there
ClassLoader old = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
try {
SmallRyeConfig config = ConfigProvider.getConfig()
.unwrap(SmallRyeConfig.class);
TestConfig testConfig = config.getConfigMapping(TestConfig.class);
delegate = testConfig.classOrderer()
.map(klass -> ReflectionUtils.tryToLoadClass(klass)
.andThenTry(ReflectionUtils::newInstance)
.andThenTry(instance -> (ClassOrderer) instance)
.toOptional()
.orElse(EMPTY))
.orElse(EMPTY);
} finally {
Thread.currentThread().setContextClassLoader(old);
}
}
@Override
public void orderClasses(final ClassOrdererContext context) {
delegate.orderClasses(context);
}
private static final ClassOrderer EMPTY = new ClassOrderer() {
@Override
public void orderClasses(final ClassOrdererContext context) {
}
};
}
| QuarkusClassOrderer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PartitionRequestListenerManager.java | {
"start": 1119,
"end": 2997
} | class ____ {
private final Map<InputChannelID, PartitionRequestListener> listeners;
public PartitionRequestListenerManager() {
this.listeners = new HashMap<>();
}
public Collection<PartitionRequestListener> getPartitionRequestListeners() {
return listeners.values();
}
public void remove(InputChannelID receiverId) {
listeners.remove(receiverId);
}
public boolean isEmpty() {
return listeners.isEmpty();
}
public void registerListener(PartitionRequestListener listener) {
PartitionRequestListener previous = listeners.put(listener.getReceiverId(), listener);
if (previous != null) {
throw new IllegalStateException(
"Partition request listener with receiver "
+ listener.getReceiverId()
+ " has been registered.");
}
}
/**
* Remove the expire partition request listener and add it to the given timeoutListeners.
*
* @param now the timestamp
* @param timeout the timeout mills
* @param timeoutListeners the expire partition request listeners
*/
public void removeExpiration(
long now, long timeout, Collection<PartitionRequestListener> timeoutListeners) {
Iterator<Map.Entry<InputChannelID, PartitionRequestListener>> iterator =
listeners.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<InputChannelID, PartitionRequestListener> entry = iterator.next();
PartitionRequestListener partitionRequestListener = entry.getValue();
if ((now - partitionRequestListener.getCreateTimestamp()) > timeout) {
timeoutListeners.add(partitionRequestListener);
iterator.remove();
}
}
}
}
| PartitionRequestListenerManager |
java | quarkusio__quarkus | integration-tests/oidc-wiremock/src/main/java/io/quarkus/it/keycloak/OidcEventResource.java | {
"start": 271,
"end": 1423
} | class ____ {
private final OidcEventObserver oidcEventObserver;
private final String expectedAuthServerUrl;
public OidcEventResource(OidcEventObserver oidcEventObserver, OidcConfig oidcConfig) {
this.expectedAuthServerUrl = dropTrailingSlash(OidcConfig.getDefaultTenant(oidcConfig).authServerUrl().get());
this.oidcEventObserver = oidcEventObserver;
}
@Path("/unavailable-auth-server-urls")
@GET
public String unavailableAuthServerUrls() {
return oidcEventObserver
.getUnavailableAuthServerUrls()
.stream()
.sorted(String::compareTo)
.collect(Collectors.joining("-"));
}
@Path("/available-auth-server-urls")
@GET
public String availableAuthServerUrls() {
return oidcEventObserver
.getAvailableAuthServerUrls()
.stream()
.sorted(String::compareTo)
.collect(Collectors.joining("-"));
}
@GET
@Path("/expected-auth-server-url")
public String getExpectedAuthServerUrl() {
return expectedAuthServerUrl;
}
}
| OidcEventResource |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/support/JpaPersistableEntityInformation.java | {
"start": 1337,
"end": 2343
} | class ____ {@link Metamodel}.
*
* @param domainClass must not be {@literal null}.
* @param metamodel must not be {@literal null}.
* @param persistenceUnitUtil must not be {@literal null}.
*/
public JpaPersistableEntityInformation(Class<T> domainClass, Metamodel metamodel,
PersistenceUnitUtil persistenceUnitUtil) {
super(domainClass, metamodel, persistenceUnitUtil);
}
/**
* Creates a new {@link JpaPersistableEntityInformation} for the given {@link Metamodel}.
*
* @param entityType must not be {@literal null}.
* @param metamodel must not be {@literal null}.
* @param persistenceUnitUtil must not be {@literal null}.
* @since 4.0
*/
JpaPersistableEntityInformation(EntityType<T> entityType, Metamodel metamodel,
PersistenceUnitUtil persistenceUnitUtil) {
super(entityType, metamodel, persistenceUnitUtil);
}
@Override
public boolean isNew(T entity) {
return entity.isNew();
}
@Override
public @Nullable ID getId(T entity) {
return entity.getId();
}
}
| and |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/protocol/TCompactProtocol.java | {
"start": 3881,
"end": 30251
} | class ____ {
public static final byte BOOLEAN_TRUE = 0x01;
public static final byte BOOLEAN_FALSE = 0x02;
public static final byte BYTE = 0x03;
public static final byte I16 = 0x04;
public static final byte I32 = 0x05;
public static final byte I64 = 0x06;
public static final byte DOUBLE = 0x07;
public static final byte BINARY = 0x08;
public static final byte LIST = 0x09;
public static final byte SET = 0x0A;
public static final byte MAP = 0x0B;
public static final byte STRUCT = 0x0C;
public static final byte UUID = 0x0D;
}
/**
* Used to keep track of the last field for the current and previous structs, so we can do the
* delta stuff.
*/
private final ShortStack lastField_ = new ShortStack(15);
private short lastFieldId_ = 0;
/**
* If we encounter a boolean field begin, save the TField here so it can have the value
* incorporated.
*/
private TField booleanField_ = null;
/**
* If we read a field header, and it's a boolean field, save the boolean value here so that
* readBool can use it.
*/
private Boolean boolValue_ = null;
/**
* The maximum number of bytes to read from the transport for variable-length fields (such as
* strings or binary) or {@link #NO_LENGTH_LIMIT} for unlimited.
*/
private final long stringLengthLimit_;
/**
* The maximum number of elements to read from the network for containers (maps, sets, lists), or
* {@link #NO_LENGTH_LIMIT} for unlimited.
*/
private final long containerLengthLimit_;
/**
* Temporary buffer used for various operations that would otherwise require a small allocation.
*/
private final byte[] temp = new byte[16];
/**
* Create a TCompactProtocol.
*
* @param transport the TTransport object to read from or write to.
* @param stringLengthLimit the maximum number of bytes to read for variable-length fields.
* @param containerLengthLimit the maximum number of elements to read for containers.
*/
public TCompactProtocol(TTransport transport, long stringLengthLimit, long containerLengthLimit) {
super(transport);
this.stringLengthLimit_ = stringLengthLimit;
this.containerLengthLimit_ = containerLengthLimit;
}
/**
* Create a TCompactProtocol.
*
* @param transport the TTransport object to read from or write to.
* @param stringLengthLimit the maximum number of bytes to read for variable-length fields.
* @deprecated Use constructor specifying both string limit and container limit instead
*/
@Deprecated
public TCompactProtocol(TTransport transport, long stringLengthLimit) {
this(transport, stringLengthLimit, NO_LENGTH_LIMIT);
}
/**
* Create a TCompactProtocol.
*
* @param transport the TTransport object to read from or write to.
*/
public TCompactProtocol(TTransport transport) {
this(transport, NO_LENGTH_LIMIT, NO_LENGTH_LIMIT);
}
@Override
public void reset() {
lastField_.clear();
lastFieldId_ = 0;
}
//
// Public Writing methods.
//
/**
* Write a message header to the wire. Compact Protocol messages contain the protocol version so
* we can migrate forwards in the future if need be.
*/
@Override
public void writeMessageBegin(TMessage message) throws TException {
writeByteDirect(PROTOCOL_ID);
writeByteDirect((VERSION & VERSION_MASK) | ((message.type << TYPE_SHIFT_AMOUNT) & TYPE_MASK));
writeVarint32(message.seqid);
writeString(message.name);
}
/**
* Write a struct begin. This doesn't actually put anything on the wire. We use it as an
* opportunity to put special placeholder markers on the field stack so we can get the field id
* deltas correct.
*/
@Override
public void writeStructBegin(TStruct struct) throws TException {
lastField_.push(lastFieldId_);
lastFieldId_ = 0;
}
/**
* Write a struct end. This doesn't actually put anything on the wire. We use this as an
* opportunity to pop the last field from the current struct off of the field stack.
*/
@Override
public void writeStructEnd() throws TException {
lastFieldId_ = lastField_.pop();
}
/**
* Write a field header containing the field id and field type. If the difference between the
* current field id and the last one is small (< 15), then the field id will be encoded in the
* 4 MSB as a delta. Otherwise, the field id will follow the type header as a zigzag varint.
*/
@Override
public void writeFieldBegin(TField field) throws TException {
if (field.type == TType.BOOL) {
// we want to possibly include the value, so we'll wait.
booleanField_ = field;
} else {
writeFieldBeginInternal(field, (byte) -1);
}
}
/**
* The workhorse of writeFieldBegin. It has the option of doing a 'type override' of the type
* header. This is used specifically in the boolean field case.
*/
private void writeFieldBeginInternal(TField field, byte typeOverride) throws TException {
// short lastField = lastField_.pop();
// if there's a type override, use that.
byte typeToWrite = typeOverride == -1 ? getCompactType(field.type) : typeOverride;
// check if we can use delta encoding for the field id
if (field.id > lastFieldId_ && field.id - lastFieldId_ <= 15) {
// write them together
writeByteDirect((field.id - lastFieldId_) << 4 | typeToWrite);
} else {
// write them separate
writeByteDirect(typeToWrite);
writeI16(field.id);
}
lastFieldId_ = field.id;
// lastField_.push(field.id);
}
/** Write the STOP symbol so we know there are no more fields in this struct. */
@Override
public void writeFieldStop() throws TException {
writeByteDirect(TType.STOP);
}
/**
* Write a map header. If the map is empty, omit the key and value type headers, as we don't need
* any additional information to skip it.
*/
@Override
public void writeMapBegin(TMap map) throws TException {
if (map.size == 0) {
writeByteDirect(0);
} else {
writeVarint32(map.size);
writeByteDirect(getCompactType(map.keyType) << 4 | getCompactType(map.valueType));
}
}
/** Write a list header. */
@Override
public void writeListBegin(TList list) throws TException {
writeCollectionBegin(list.elemType, list.size);
}
/** Write a set header. */
@Override
public void writeSetBegin(TSet set) throws TException {
writeCollectionBegin(set.elemType, set.size);
}
/**
* Write a boolean value. Potentially, this could be a boolean field, in which case the field
* header info isn't written yet. If so, decide what the right type header is for the value and
* then write the field header. Otherwise, write a single byte.
*/
@Override
public void writeBool(boolean b) throws TException {
if (booleanField_ != null) {
// we haven't written the field header yet
writeFieldBeginInternal(booleanField_, b ? Types.BOOLEAN_TRUE : Types.BOOLEAN_FALSE);
booleanField_ = null;
} else {
// we're not part of a field, so just write the value.
writeByteDirect(b ? Types.BOOLEAN_TRUE : Types.BOOLEAN_FALSE);
}
}
/** Write a byte. Nothing to see here! */
@Override
public void writeByte(byte b) throws TException {
writeByteDirect(b);
}
/** Write an I16 as a zigzag varint. */
@Override
public void writeI16(short i16) throws TException {
writeVarint32(intToZigZag(i16));
}
/** Write an i32 as a zigzag varint. */
@Override
public void writeI32(int i32) throws TException {
writeVarint32(intToZigZag(i32));
}
/** Write an i64 as a zigzag varint. */
@Override
public void writeI64(long i64) throws TException {
writeVarint64(longToZigzag(i64));
}
/** Write a double to the wire as 8 bytes. */
@Override
public void writeDouble(double dub) throws TException {
fixedLongToBytes(Double.doubleToLongBits(dub), temp, 0);
trans_.write(temp, 0, 8);
}
@Override
public void writeUuid(UUID uuid) throws TException {
fixedLongToBytes(uuid.getLeastSignificantBits(), temp, 0);
fixedLongToBytes(uuid.getMostSignificantBits(), temp, 8);
trans_.write(temp, 0, 16);
}
/** Write a string to the wire with a varint size preceding. */
@Override
public void writeString(String str) throws TException {
byte[] bytes = str.getBytes(StandardCharsets.UTF_8);
writeVarint32(bytes.length);
trans_.write(bytes, 0, bytes.length);
}
/** Write a byte array, using a varint for the size. */
@Override
public void writeBinary(ByteBuffer bin) throws TException {
ByteBuffer bb = bin.asReadOnlyBuffer();
writeVarint32(bb.remaining());
trans_.write(bb);
}
//
// These methods are called by structs, but don't actually have any wire
// output or purpose.
//
@Override
public void writeMessageEnd() throws TException {}
@Override
public void writeMapEnd() throws TException {}
@Override
public void writeListEnd() throws TException {}
@Override
public void writeSetEnd() throws TException {}
@Override
public void writeFieldEnd() throws TException {}
//
// Internal writing methods
//
/**
* Abstract method for writing the start of lists and sets. List and sets on the wire differ only
* by the type indicator.
*/
protected void writeCollectionBegin(byte elemType, int size) throws TException {
if (size <= 14) {
writeByteDirect(size << 4 | getCompactType(elemType));
} else {
writeByteDirect(0xf0 | getCompactType(elemType));
writeVarint32(size);
}
}
/**
* Write an i32 as a varint. Results in 1-5 bytes on the wire. TODO: make a permanent buffer like
* writeVarint64?
*/
private void writeVarint32(int n) throws TException {
int idx = 0;
while (true) {
if ((n & ~0x7F) == 0) {
temp[idx++] = (byte) n;
// writeByteDirect((byte)n);
break;
// return;
} else {
temp[idx++] = (byte) ((n & 0x7F) | 0x80);
// writeByteDirect((byte)((n & 0x7F) | 0x80));
n >>>= 7;
}
}
trans_.write(temp, 0, idx);
}
/** Write an i64 as a varint. Results in 1-10 bytes on the wire. */
private void writeVarint64(long n) throws TException {
int idx = 0;
while (true) {
if ((n & ~0x7FL) == 0) {
temp[idx++] = (byte) n;
break;
} else {
temp[idx++] = ((byte) ((n & 0x7F) | 0x80));
n >>>= 7;
}
}
trans_.write(temp, 0, idx);
}
/**
* Convert l into a zigzag long. This allows negative numbers to be represented compactly as a
* varint.
*/
private long longToZigzag(long l) {
return (l << 1) ^ (l >> 63);
}
/**
* Convert n into a zigzag int. This allows negative numbers to be represented compactly as a
* varint.
*/
private int intToZigZag(int n) {
return (n << 1) ^ (n >> 31);
}
/** Convert a long into little-endian bytes in buf starting at off and going until off+7. */
private void fixedLongToBytes(long n, byte[] buf, int off) {
buf[off + 0] = (byte) (n & 0xff);
buf[off + 1] = (byte) ((n >> 8) & 0xff);
buf[off + 2] = (byte) ((n >> 16) & 0xff);
buf[off + 3] = (byte) ((n >> 24) & 0xff);
buf[off + 4] = (byte) ((n >> 32) & 0xff);
buf[off + 5] = (byte) ((n >> 40) & 0xff);
buf[off + 6] = (byte) ((n >> 48) & 0xff);
buf[off + 7] = (byte) ((n >> 56) & 0xff);
}
/**
* Writes a byte without any possibility of all that field header nonsense. Used internally by
* other writing methods that know they need to write a byte.
*/
private void writeByteDirect(byte b) throws TException {
temp[0] = b;
trans_.write(temp, 0, 1);
}
/** Writes a byte without any possibility of all that field header nonsense. */
private void writeByteDirect(int n) throws TException {
writeByteDirect((byte) n);
}
//
// Reading methods.
//
/** Read a message header. */
@Override
public TMessage readMessageBegin() throws TException {
byte protocolId = readByte();
if (protocolId != PROTOCOL_ID) {
throw new TProtocolException(
"Expected protocol id "
+ Integer.toHexString(PROTOCOL_ID)
+ " but got "
+ Integer.toHexString(protocolId));
}
byte versionAndType = readByte();
byte version = (byte) (versionAndType & VERSION_MASK);
if (version != VERSION) {
throw new TProtocolException("Expected version " + VERSION + " but got " + version);
}
byte type = (byte) ((versionAndType >> TYPE_SHIFT_AMOUNT) & TYPE_BITS);
int seqid = readVarint32();
String messageName = readString();
return new TMessage(messageName, type, seqid);
}
/**
* Read a struct begin. There's nothing on the wire for this, but it is our opportunity to push a
* new struct begin marker onto the field stack.
*/
@Override
public TStruct readStructBegin() throws TException {
lastField_.push(lastFieldId_);
lastFieldId_ = 0;
return ANONYMOUS_STRUCT;
}
/**
* Doesn't actually consume any wire data, just removes the last field for this struct from the
* field stack.
*/
@Override
public void readStructEnd() throws TException {
// consume the last field we read off the wire.
lastFieldId_ = lastField_.pop();
}
/** Read a field header off the wire. */
@Override
public TField readFieldBegin() throws TException {
byte type = readByte();
// if it's a stop, then we can return immediately, as the struct is over.
if (type == TType.STOP) {
return TSTOP;
}
return new TField("", getTType((byte) (type & 0x0f)), readFieldId(type));
}
/**
* Read a map header off the wire. If the size is zero, skip reading the key and value type. This
* means that 0-length maps will yield TMaps without the "correct" types.
*/
@Override
public TMap readMapBegin() throws TException {
int size = readVarint32();
checkContainerReadLength(size);
byte keyAndValueType = size == 0 ? 0 : readByte();
TMap map =
new TMap(
getTType((byte) (keyAndValueType >> 4)),
getTType((byte) (keyAndValueType & 0xf)),
size);
checkReadBytesAvailable(map);
return map;
}
/**
* Read a list header off the wire. If the list size is 0-14, the size will be packed into the
* element type header. If it's a longer list, the 4 MSB of the element type header will be 0xF,
* and a varint will follow with the true size.
*/
@Override
public TList readListBegin() throws TException {
byte size_and_type = readByte();
int size = (size_and_type >> 4) & 0x0f;
if (size == 15) {
size = readVarint32();
}
checkContainerReadLength(size);
TList list = new TList(getTType(size_and_type), size);
checkReadBytesAvailable(list);
return list;
}
/**
* Read a set header off the wire. If the set size is 0-14, the size will be packed into the
* element type header. If it's a longer set, the 4 MSB of the element type header will be 0xF,
* and a varint will follow with the true size.
*/
@Override
public TSet readSetBegin() throws TException {
return new TSet(readListBegin());
}
/**
* Read a boolean off the wire. If this is a boolean field, the value should already have been
* read during readFieldBegin, so we'll just consume the pre-stored value. Otherwise, read a byte.
*/
@Override
public boolean readBool() throws TException {
if (boolValue_ != null) {
boolean result = boolValue_;
boolValue_ = null;
return result;
}
return readByte() == Types.BOOLEAN_TRUE;
}
/** Read a single byte off the wire. Nothing interesting here. */
@Override
public byte readByte() throws TException {
byte b;
if (trans_.getBytesRemainingInBuffer() > 0) {
b = trans_.getBuffer()[trans_.getBufferPosition()];
trans_.consumeBuffer(1);
} else {
trans_.readAll(temp, 0, 1);
b = temp[0];
}
return b;
}
/** Read an i16 from the wire as a zigzag varint. */
@Override
public short readI16() throws TException {
return (short) zigzagToInt(readVarint32());
}
/** Read an i32 from the wire as a zigzag varint. */
@Override
public int readI32() throws TException {
return zigzagToInt(readVarint32());
}
/** Read an i64 from the wire as a zigzag varint. */
@Override
public long readI64() throws TException {
return zigzagToLong(readVarint64());
}
/** No magic here - just read a double off the wire. */
@Override
public double readDouble() throws TException {
trans_.readAll(temp, 0, 8);
return Double.longBitsToDouble(bytesToLong(temp));
}
@Override
public UUID readUuid() throws TException {
trans_.readAll(temp, 0, 16);
long mostSigBits = bytesToLong(temp, 8);
long leastSigBits = bytesToLong(temp, 0);
return new UUID(mostSigBits, leastSigBits);
}
/** Reads a byte[] (via readBinary), and then UTF-8 decodes it. */
@Override
public String readString() throws TException {
int length = readVarint32();
checkStringReadLength(length);
if (length == 0) {
return "";
}
final String str;
if (trans_.getBytesRemainingInBuffer() >= length) {
str =
new String(
trans_.getBuffer(), trans_.getBufferPosition(), length, StandardCharsets.UTF_8);
trans_.consumeBuffer(length);
} else {
str = new String(readBinary(length), StandardCharsets.UTF_8);
}
return str;
}
/** Read a ByteBuffer from the wire. */
@Override
public ByteBuffer readBinary() throws TException {
int length = readVarint32();
if (length == 0) {
return EMPTY_BUFFER;
}
getTransport().checkReadBytesAvailable(length);
if (trans_.getBytesRemainingInBuffer() >= length) {
ByteBuffer bb = ByteBuffer.wrap(trans_.getBuffer(), trans_.getBufferPosition(), length);
trans_.consumeBuffer(length);
return bb;
}
byte[] buf = new byte[length];
trans_.readAll(buf, 0, length);
return ByteBuffer.wrap(buf);
}
/** Read a byte[] of a known length from the wire. */
private byte[] readBinary(int length) throws TException {
if (length == 0) return EMPTY_BYTES;
byte[] buf = new byte[length];
trans_.readAll(buf, 0, length);
return buf;
}
private void checkStringReadLength(int length) throws TException {
if (length < 0) {
throw new TProtocolException(TProtocolException.NEGATIVE_SIZE, "Negative length: " + length);
}
getTransport().checkReadBytesAvailable(length);
if (stringLengthLimit_ != NO_LENGTH_LIMIT && length > stringLengthLimit_) {
throw new TProtocolException(
TProtocolException.SIZE_LIMIT, "Length exceeded max allowed: " + length);
}
}
private void checkContainerReadLength(int length) throws TProtocolException {
if (length < 0) {
throw new TProtocolException(TProtocolException.NEGATIVE_SIZE, "Negative length: " + length);
}
if (containerLengthLimit_ != NO_LENGTH_LIMIT && length > containerLengthLimit_) {
throw new TProtocolException(
TProtocolException.SIZE_LIMIT, "Length exceeded max allowed: " + length);
}
}
//
// These methods are here for the struct to call, but don't have any wire
// encoding.
//
@Override
public void readMessageEnd() throws TException {}
@Override
public void readFieldEnd() throws TException {}
@Override
public void readMapEnd() throws TException {}
@Override
public void readListEnd() throws TException {}
@Override
public void readSetEnd() throws TException {}
//
// Internal reading methods
//
/**
* Read an i32 from the wire as a varint. The MSB of each byte is set if there is another byte to
* follow. This can read up to 5 bytes.
*/
private int readVarint32() throws TException {
int result = 0;
int shift = 0;
if (trans_.getBytesRemainingInBuffer() >= 5) {
byte[] buf = trans_.getBuffer();
int pos = trans_.getBufferPosition();
int off = 0;
while (true) {
byte b = buf[pos + off];
result |= (b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
off++;
}
trans_.consumeBuffer(off + 1);
} else {
while (true) {
byte b = readByte();
result |= (b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
}
}
return result;
}
/**
* Read an i64 from the wire as a proper varint. The MSB of each byte is set if there is another
* byte to follow. This can read up to 10 bytes.
*/
private long readVarint64() throws TException {
int shift = 0;
long result = 0;
if (trans_.getBytesRemainingInBuffer() >= 10) {
byte[] buf = trans_.getBuffer();
int pos = trans_.getBufferPosition();
int off = 0;
while (true) {
byte b = buf[pos + off];
result |= (long) (b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
off++;
}
trans_.consumeBuffer(off + 1);
} else {
while (true) {
byte b = readByte();
result |= (long) (b & 0x7f) << shift;
if ((b & 0x80) != 0x80) break;
shift += 7;
}
}
return result;
}
//
// encoding helpers
//
/** Convert from zigzag int to int. */
private int zigzagToInt(int n) {
return (n >>> 1) ^ -(n & 1);
}
/** Convert from zigzag long to long. */
private long zigzagToLong(long n) {
return (n >>> 1) ^ -(n & 1);
}
/**
* Note that it's important that the mask bytes are long literals, otherwise they'll default to
* ints, and when you shift an int left 56 bits, you just get a messed up int.
*/
private long bytesToLong(byte[] bytes) {
return bytesToLong(bytes, 0);
}
private long bytesToLong(byte[] bytes, int offset) {
return ((bytes[offset + 7] & 0xffL) << 56)
| ((bytes[offset + 6] & 0xffL) << 48)
| ((bytes[offset + 5] & 0xffL) << 40)
| ((bytes[offset + 4] & 0xffL) << 32)
| ((bytes[offset + 3] & 0xffL) << 24)
| ((bytes[offset + 2] & 0xffL) << 16)
| ((bytes[offset + 1] & 0xffL) << 8)
| ((bytes[offset + 0] & 0xffL));
}
//
// type testing and converting
//
private boolean isBoolType(byte b) {
int lowerNibble = b & 0x0f;
return lowerNibble == Types.BOOLEAN_TRUE || lowerNibble == Types.BOOLEAN_FALSE;
}
/** Given a TCompactProtocol.Types constant, convert it to its corresponding TType value. */
private byte getTType(byte type) throws TProtocolException {
switch ((byte) (type & 0x0f)) {
case TType.STOP:
return TType.STOP;
case Types.BOOLEAN_FALSE:
case Types.BOOLEAN_TRUE:
return TType.BOOL;
case Types.BYTE:
return TType.BYTE;
case Types.I16:
return TType.I16;
case Types.I32:
return TType.I32;
case Types.I64:
return TType.I64;
case Types.UUID:
return TType.UUID;
case Types.DOUBLE:
return TType.DOUBLE;
case Types.BINARY:
return TType.STRING;
case Types.LIST:
return TType.LIST;
case Types.SET:
return TType.SET;
case Types.MAP:
return TType.MAP;
case Types.STRUCT:
return TType.STRUCT;
default:
throw new TProtocolException("don't know what type: " + (byte) (type & 0x0f));
}
}
/** Given a TType value, find the appropriate TCompactProtocol.Types constant. */
private byte getCompactType(byte ttype) {
return ttypeToCompactType[ttype];
}
/** Return the minimum number of bytes a type will consume on the wire */
@Override
public int getMinSerializedSize(byte type) throws TTransportException {
switch (type) {
case 0:
return 1; // Stop - T_STOP needs to count itself
case 1:
return 1; // Void - T_VOID needs to count itself
case 2:
return 1; // Bool sizeof(byte)
case 3:
return 1; // Byte sizeof(byte)
case 4:
return 8; // Double sizeof(double)
case 6:
return 1; // I16 sizeof(byte)
case 8:
return 1; // I32 sizeof(byte)
case 10:
return 1; // I64 sizeof(byte)
case 11:
return 1; // string length sizeof(byte)
case 12:
return 1; // empty struct needs at least 1 byte for the T_STOP
case 13:
return 1; // element count Map sizeof(byte)
case 14:
return 1; // element count Set sizeof(byte)
case 15:
return 1; // element count List sizeof(byte)
default:
throw new TTransportException(TTransportException.UNKNOWN, "unrecognized type code");
}
}
// -----------------------------------------------------------------
// Additional methods to improve performance.
@Override
public int readFieldBeginData() throws TException {
byte type = readByte();
// if it's a stop, then we can return immediately, as the struct is over.
if (type == TType.STOP) {
return TFieldData.encode(type);
}
return TFieldData.encode(getTType((byte) (type & 0x0f)), readFieldId(type));
}
// Only makes sense to be called by readFieldBegin and readFieldBeginData
private short readFieldId(byte type) throws TException {
short fieldId;
// mask off the 4 MSB of the type header. it could contain a field id delta.
short modifier = (short) ((type & 0xf0) >> 4);
if (modifier == 0) {
// not a delta. look ahead for the zigzag varint field id.
fieldId = readI16();
} else {
// has a delta. add the delta to the last read field id.
fieldId = (short) (lastFieldId_ + modifier);
}
// if this happens to be a boolean field, the value is encoded in the type
if (isBoolType(type)) {
// save the boolean value in a special instance variable.
boolValue_ = (byte) (type & 0x0f) == Types.BOOLEAN_TRUE ? Boolean.TRUE : Boolean.FALSE;
}
// push the new field onto the field stack so we can keep the deltas going.
lastFieldId_ = fieldId;
return fieldId;
}
@Override
protected void skipBinary() throws TException {
int size = intToZigZag(readI32());
this.skipBytes(size);
}
}
| Types |
java | google__guava | android/guava-tests/test/com/google/common/cache/PackageSanityTests.java | {
"start": 841,
"end": 1301
} | class ____ extends AbstractPackageSanityTests {
public PackageSanityTests() {
setDefault(
CacheLoader.class,
new CacheLoader<Object, Object>() {
@Override
public Object load(Object key) {
return key;
}
});
setDefault(LocalCache.class, new LocalCache<Object, Object>(CacheBuilder.newBuilder(), null));
setDefault(CacheBuilder.class, CacheBuilder.newBuilder());
}
}
| PackageSanityTests |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/ctx/DatasourceFactory.java | {
"start": 1118,
"end": 2614
} | class ____ {
@Context
@Requires(condition = JdbcDataSourceEnabled.class)
@EachBean(DataSourceConfiguration.class)
public DataSource dataSource(DataSourceConfiguration datasourceConfiguration) {
return new DataSource() {
@Override
public Connection getConnection() throws SQLException {
return null;
}
@Override
public Connection getConnection(String username, String password) throws SQLException {
return null;
}
@Override
public PrintWriter getLogWriter() throws SQLException {
return null;
}
@Override
public void setLogWriter(PrintWriter out) throws SQLException {
}
@Override
public void setLoginTimeout(int seconds) throws SQLException {
}
@Override
public int getLoginTimeout() throws SQLException {
return 0;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return null;
}
};
}
}
| DatasourceFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/LazyToOnesNoProxyFactoryWithSubclassesStatefulTest.java | {
"start": 7729,
"end": 8350
} | class ____ {
@Id
private String id;
@ManyToOne(fetch = FetchType.LAZY)
private Animal animal = null;
@ManyToOne(fetch = FetchType.LAZY)
private Primate primate = null;
@ManyToOne(fetch = FetchType.LAZY)
private Human human = null;
@ManyToOne(fetch = FetchType.LAZY)
private Human otherHuman = null;
protected OtherEntity() {
// this form used by Hibernate
}
public OtherEntity(String id) {
this.id = id;
}
public String getId() {
return id;
}
public Human getHuman() {
return human;
}
public void setHuman(Human human) {
this.human = human;
}
}
}
| OtherEntity |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/AbstractUpdateOrDeleteStatement.java | {
"start": 482,
"end": 1553
} | class ____ extends AbstractMutationStatement {
private final FromClause fromClause;
private final Predicate restriction;
public AbstractUpdateOrDeleteStatement(
NamedTableReference targetTable,
FromClause fromClause,
Predicate restriction) {
this( null, targetTable, fromClause, restriction, Collections.emptyList() );
}
public AbstractUpdateOrDeleteStatement(
NamedTableReference targetTable,
FromClause fromClause,
Predicate restriction,
List<ColumnReference> returningColumns) {
this( null, targetTable, fromClause, restriction, returningColumns );
}
public AbstractUpdateOrDeleteStatement(
CteContainer cteContainer,
NamedTableReference targetTable,
FromClause fromClause,
Predicate restriction,
List<ColumnReference> returningColumns) {
super( cteContainer, targetTable, returningColumns );
this.fromClause = fromClause;
this.restriction = restriction;
}
public FromClause getFromClause() {
return fromClause;
}
public Predicate getRestriction() {
return restriction;
}
}
| AbstractUpdateOrDeleteStatement |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/BridgeMethodResolverTests.java | {
"start": 31431,
"end": 31765
} | class ____ extends AbstractDomainObject<ParameterType, byte[]> {
@Override
public byte[] method1(ParameterType p) {
return super.method1(p);
}
@Override
public void method2(ParameterType p, byte[] r) {
super.method2(p, r);
}
}
//-------------------
// SPR-3534 classes
//-------------------
public | DomainObject |
java | apache__kafka | connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java | {
"start": 2233,
"end": 6336
} | class ____<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
public static final String OVERVIEW_DOC =
"Convert timestamps between different formats such as Unix epoch, strings, and Connect Date/Timestamp types."
+ "Applies to individual fields or to the entire value."
+ "<p/>Use the concrete transformation type designed for the record key (<code>" + TimestampConverter.Key.class.getName() + "</code>) "
+ "or value (<code>" + TimestampConverter.Value.class.getName() + "</code>).";
public static final String FIELD_CONFIG = "field";
private static final String FIELD_DEFAULT = "";
public static final String TARGET_TYPE_CONFIG = "target.type";
public static final String FORMAT_CONFIG = "format";
private static final String FORMAT_DEFAULT = "";
public static final String UNIX_PRECISION_CONFIG = "unix.precision";
private static final String UNIX_PRECISION_DEFAULT = "milliseconds";
public static final String REPLACE_NULL_WITH_DEFAULT_CONFIG = "replace.null.with.default";
private static final String PURPOSE = "converting timestamp formats";
private static final String TYPE_STRING = "string";
private static final String TYPE_UNIX = "unix";
private static final String TYPE_DATE = "Date";
private static final String TYPE_TIME = "Time";
private static final String TYPE_TIMESTAMP = "Timestamp";
private static final String UNIX_PRECISION_MILLIS = "milliseconds";
private static final String UNIX_PRECISION_MICROS = "microseconds";
private static final String UNIX_PRECISION_NANOS = "nanoseconds";
private static final String UNIX_PRECISION_SECONDS = "seconds";
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
public static final Schema OPTIONAL_DATE_SCHEMA = org.apache.kafka.connect.data.Date.builder().optional().schema();
public static final Schema OPTIONAL_TIMESTAMP_SCHEMA = Timestamp.builder().optional().schema();
public static final Schema OPTIONAL_TIME_SCHEMA = Time.builder().optional().schema();
public static final ConfigDef CONFIG_DEF = new ConfigDef()
.define(FIELD_CONFIG, ConfigDef.Type.STRING, FIELD_DEFAULT, ConfigDef.Importance.HIGH,
"The field containing the timestamp, or empty if the entire value is a timestamp")
.define(TARGET_TYPE_CONFIG, ConfigDef.Type.STRING, ConfigDef.NO_DEFAULT_VALUE,
ConfigDef.ValidString.in(TYPE_STRING, TYPE_UNIX, TYPE_DATE, TYPE_TIME, TYPE_TIMESTAMP),
ConfigDef.Importance.HIGH,
"The desired timestamp representation: string, unix, Date, Time, or Timestamp")
.define(FORMAT_CONFIG, ConfigDef.Type.STRING, FORMAT_DEFAULT, ConfigDef.Importance.MEDIUM,
"A SimpleDateFormat-compatible format for the timestamp. Used to generate the output when type=string "
+ "or used to parse the input if the input is a string.")
.define(UNIX_PRECISION_CONFIG, ConfigDef.Type.STRING, UNIX_PRECISION_DEFAULT,
ConfigDef.ValidString.in(
UNIX_PRECISION_NANOS, UNIX_PRECISION_MICROS,
UNIX_PRECISION_MILLIS, UNIX_PRECISION_SECONDS),
ConfigDef.Importance.LOW,
"The desired Unix precision for the timestamp: seconds, milliseconds, microseconds, or nanoseconds. " +
"Used to generate the output when type=unix or used to parse the input if the input is a Long." +
"Note: This SMT will cause precision loss during conversions from, and to, values with sub-millisecond components.")
.define(REPLACE_NULL_WITH_DEFAULT_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM,
"Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.");
private | TimestampConverter |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/test/java/io/quarkus/mongodb/reactive/DatabaseRunCommandTest.java | {
"start": 389,
"end": 1596
} | class ____ extends MongoTestBase {
private ReactiveMongoClient client;
@BeforeEach
void init() {
client = new ReactiveMongoClientImpl(MongoClients.create(getConnectionString()));
}
@AfterEach
void cleanup() {
client.getDatabase(DATABASE).drop().await().indefinitely();
client.close();
}
@Test
void run() {
Document info = client.getDatabase(DATABASE).runCommand(new Document("buildInfo", 1)).await().indefinitely();
assertThat(info.getDouble("ok")).isEqualTo(1.0);
info = client.getDatabase(DATABASE).runCommand(new Document("buildInfo", 1), Document.class)
.await().indefinitely();
assertThat(info.getDouble("ok")).isEqualTo(1.0);
info = client.getDatabase(DATABASE).runCommand(new Document("buildInfo", 1), ReadPreference.nearest())
.await().indefinitely();
assertThat(info.getDouble("ok")).isEqualTo(1.0);
info = client.getDatabase(DATABASE).runCommand(new Document("buildInfo", 1), ReadPreference.nearest(), Document.class)
.await().indefinitely();
assertThat(info.getDouble("ok")).isEqualTo(1.0);
}
}
| DatabaseRunCommandTest |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 71009,
"end": 71356
} | interface ____ {",
" ChildInterface child(ChildModule childModule);",
" }",
"}");
Source parent =
CompilerTests.javaSource(
"test.Parent",
"package test;",
"",
"import dagger.Component;",
"",
"@Component",
" | Factory |
java | spring-projects__spring-security | kerberos/kerberos-core/src/main/java/org/springframework/security/kerberos/authentication/KerberosClient.java | {
"start": 786,
"end": 877
} | interface ____ {
JaasSubjectHolder login(String username, String password);
}
| KerberosClient |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/eventtime/WatermarkStrategyTest.java | {
"start": 1377,
"end": 7403
} | class ____ {
@Test
void testDefaultTimeStampAssigner() {
WatermarkStrategy<Object> wmStrategy = WatermarkStrategy.forMonotonousTimestamps();
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
assertThat(wmStrategy.createTimestampAssigner(assignerContext()))
.isInstanceOf(RecordTimestampAssigner.class);
}
@Test
void testLambdaTimestampAssigner() {
WatermarkStrategy<Object> wmStrategy =
WatermarkStrategy.forMonotonousTimestamps()
.withTimestampAssigner((event, timestamp) -> 42L);
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
TimestampAssigner<Object> timestampAssigner =
wmStrategy.createTimestampAssigner(assignerContext());
assertThat(timestampAssigner.extractTimestamp(null, 13L)).isEqualTo(42L);
}
@Test
void testLambdaTimestampAssignerSupplier() {
WatermarkStrategy<Object> wmStrategy =
WatermarkStrategy.forMonotonousTimestamps()
.withTimestampAssigner(
TimestampAssignerSupplier.of((event, timestamp) -> 42L));
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
TimestampAssigner<Object> timestampAssigner =
wmStrategy.createTimestampAssigner(assignerContext());
assertThat(timestampAssigner.extractTimestamp(null, 13L)).isEqualTo(42L);
}
@Test
void testAnonymousInnerTimestampAssigner() {
WatermarkStrategy<Object> wmStrategy =
WatermarkStrategy.forMonotonousTimestamps()
.withTimestampAssigner(
(SerializableTimestampAssigner<Object>)
(element, recordTimestamp) -> 42);
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
TimestampAssigner<Object> timestampAssigner =
wmStrategy.createTimestampAssigner(assignerContext());
assertThat(timestampAssigner.extractTimestamp(null, 13L)).isEqualTo(42L);
}
@Test
void testClassTimestampAssigner() {
WatermarkStrategy<Object> wmStrategy =
WatermarkStrategy.forMonotonousTimestamps()
.withTimestampAssigner((ctx) -> new TestTimestampAssigner());
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
TimestampAssigner<Object> timestampAssigner =
wmStrategy.createTimestampAssigner(assignerContext());
assertThat(timestampAssigner.extractTimestamp(null, 13L)).isEqualTo(42L);
}
@Test
void testClassTimestampAssignerUsingSupplier() {
WatermarkStrategy<Object> wmStrategy =
WatermarkStrategy.forMonotonousTimestamps()
.withTimestampAssigner((context) -> new TestTimestampAssigner());
// ensure that the closure can be cleaned through the Watermark Strategies
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
TimestampAssigner<Object> timestampAssigner =
wmStrategy.createTimestampAssigner(assignerContext());
assertThat(timestampAssigner.extractTimestamp(null, 13L)).isEqualTo(42L);
}
@Test
void testWithIdlenessHelper() {
WatermarkStrategy<String> wmStrategy =
WatermarkStrategy.<String>forMonotonousTimestamps()
.withIdleness(Duration.ofDays(7));
// ensure that the closure can be cleaned
ClosureCleaner.clean(wmStrategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
assertThat(wmStrategy.createTimestampAssigner(assignerContext()))
.isInstanceOf(RecordTimestampAssigner.class);
assertThat(wmStrategy.createWatermarkGenerator(generatorContext()))
.isInstanceOf(WatermarksWithIdleness.class);
}
@Test
void testWithWatermarkAlignment() {
final String watermarkGroup = "group-1";
final Duration maxAllowedWatermarkDrift = Duration.ofMillis(200);
final WatermarkStrategy<String> strategy =
WatermarkStrategy.<String>forMonotonousTimestamps()
.withWatermarkAlignment(watermarkGroup, maxAllowedWatermarkDrift)
// we call a different builder method on top of watermark alignment
// to make sure it can be properly mixed
.withIdleness(Duration.ofMillis(200));
// ensure that the closure can be cleaned
ClosureCleaner.clean(strategy, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
final WatermarkAlignmentParams alignmentParameters = strategy.getAlignmentParameters();
assertThat(alignmentParameters.getWatermarkGroup()).isEqualTo(watermarkGroup);
assertThat(alignmentParameters.getMaxAllowedWatermarkDrift())
.isEqualTo(maxAllowedWatermarkDrift.toMillis());
assertThat(alignmentParameters.getUpdateInterval())
.isEqualTo(WatermarksWithWatermarkAlignment.DEFAULT_UPDATE_INTERVAL.toMillis());
assertThat(strategy.createTimestampAssigner(assignerContext()))
.isInstanceOf(RecordTimestampAssigner.class);
assertThat(strategy.createWatermarkGenerator(generatorContext()))
.isInstanceOf(WatermarksWithIdleness.class);
}
static | WatermarkStrategyTest |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/spi/MappingExclusionProvider.java | {
"start": 2333,
"end": 2731
} | class ____ won't try to automatically map it with some other type.
* The given {@code typeElement} will be excluded from the automatic sub-mapping generation
*
* @param typeElement that needs to be checked
* @return {@code true} if MapStruct should exclude the provided {@link TypeElement} from an automatic sub-mapping
*/
boolean isExcluded(TypeElement typeElement);
}
| and |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueueTest.java | {
"start": 3483,
"end": 9925
} | class ____ {
private static final int BUFFER_SIZE = 1024 * 1024;
private static FileChannelManager fileChannelManager;
@BeforeAll
static void setUp(@TempDir File temporaryFolder) {
fileChannelManager =
new FileChannelManagerImpl(
new String[] {temporaryFolder.getAbsolutePath()}, "testing");
}
@AfterAll
static void shutdown() throws Exception {
fileChannelManager.close();
}
/** Test that PartitionNotFound message will be sent to downstream in notifying timeout. */
@Test
public void testNotifyReaderPartitionTimeout() throws Exception {
PartitionRequestQueue queue = new PartitionRequestQueue();
EmbeddedChannel channel = new EmbeddedChannel(queue);
ResultPartitionManager resultPartitionManager = new ResultPartitionManager();
ResultPartitionID resultPartitionId = new ResultPartitionID();
CreditBasedSequenceNumberingViewReader reader =
new CreditBasedSequenceNumberingViewReader(new InputChannelID(0, 0), 10, queue);
reader.requestSubpartitionViewOrRegisterListener(
resultPartitionManager, resultPartitionId, new ResultSubpartitionIndexSet(0));
assertThat(
resultPartitionManager
.getListenerManagers()
.get(resultPartitionId)
.getPartitionRequestListeners())
.hasSize(1);
reader.notifyPartitionRequestTimeout(
resultPartitionManager
.getListenerManagers()
.get(resultPartitionId)
.getPartitionRequestListeners()
.iterator()
.next());
channel.runPendingTasks();
Object read = channel.readOutbound();
assertThat(read)
.isNotNull()
.isInstanceOf(NettyMessage.ErrorResponse.class)
.isInstanceOfSatisfying(
NettyMessage.ErrorResponse.class,
r -> assertThat(r.cause).isInstanceOf(PartitionNotFoundException.class));
}
/**
* In case of enqueuing an empty reader and a reader that actually has some buffers when channel
* is not writable, on channelWritability change event should result in reading all of the
* messages.
*/
@Test
void testNotifyReaderNonEmptyOnEmptyReaders() throws Exception {
final int buffersToWrite = 5;
PartitionRequestQueue queue = new PartitionRequestQueue();
EmbeddedChannel channel = new EmbeddedChannel(queue);
CreditBasedSequenceNumberingViewReader reader1 =
new CreditBasedSequenceNumberingViewReader(new InputChannelID(0, 0), 10, queue);
CreditBasedSequenceNumberingViewReader reader2 =
new CreditBasedSequenceNumberingViewReader(new InputChannelID(1, 1), 10, queue);
ResultSubpartitionView view1 = new EmptyAlwaysAvailableResultSubpartitionView();
reader1.notifySubpartitionsCreated(
TestingResultPartition.newBuilder()
.setCreateSubpartitionViewFunction((index, listener) -> view1)
.build(),
new ResultSubpartitionIndexSet(0));
reader1.notifyDataAvailable(view1);
assertThat(reader1.getAvailabilityAndBacklog().isAvailable()).isTrue();
assertThat(reader1.isRegisteredAsAvailable()).isFalse();
channel.unsafe().outboundBuffer().setUserDefinedWritability(1, false);
assertThat(channel.isWritable()).isFalse();
reader1.notifyDataAvailable(view1);
channel.runPendingTasks();
ResultSubpartitionView view2 = new DefaultBufferResultSubpartitionView(buffersToWrite);
reader2.notifyDataAvailable(view2);
reader2.notifySubpartitionsCreated(
TestingResultPartition.newBuilder()
.setCreateSubpartitionViewFunction((index, listener) -> view2)
.build(),
new ResultSubpartitionIndexSet(0));
assertThat(reader2.getAvailabilityAndBacklog().isAvailable()).isTrue();
assertThat(reader2.isRegisteredAsAvailable()).isFalse();
reader2.notifyDataAvailable(view2);
// changing a channel writability should result in draining both reader1 and reader2
channel.unsafe().outboundBuffer().setUserDefinedWritability(1, true);
channel.runPendingTasks();
assertThat(channel.outboundMessages()).hasSize(buffersToWrite);
}
/** Tests {@link PartitionRequestQueue} buffer writing with default buffers. */
@Test
void testDefaultBufferWriting() throws Exception {
testBufferWriting(new DefaultBufferResultSubpartitionView(1));
}
/** Tests {@link PartitionRequestQueue} buffer writing with read-only buffers. */
@Test
void testReadOnlyBufferWriting() throws Exception {
testBufferWriting(new ReadOnlyBufferResultSubpartitionView(1));
}
private void testBufferWriting(ResultSubpartitionView view) throws IOException {
// setup
ResultPartition partition =
TestingResultPartition.newBuilder()
.setCreateSubpartitionViewFunction((index, listener) -> view)
.build();
final InputChannelID receiverId = new InputChannelID();
final PartitionRequestQueue queue = new PartitionRequestQueue();
final CreditBasedSequenceNumberingViewReader reader =
new CreditBasedSequenceNumberingViewReader(receiverId, Integer.MAX_VALUE, queue);
final EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.notifySubpartitionsCreated(partition, new ResultSubpartitionIndexSet(0));
// notify about buffer availability and encode one buffer
reader.notifyDataAvailable(view);
channel.runPendingTasks();
Object read = channel.readOutbound();
assertThat(read).isNotNull();
if (read instanceof NettyMessage.ErrorResponse) {
((NettyMessage.ErrorResponse) read).cause.printStackTrace();
}
assertThat(read).isInstanceOf(NettyMessage.BufferResponse.class);
read = channel.readOutbound();
assertThat(read).isNull();
}
private static | PartitionRequestQueueTest |
java | apache__camel | components/camel-mongodb-gridfs/src/main/java/org/apache/camel/component/mongodb/gridfs/GridFsEndpoint.java | {
"start": 1936,
"end": 9358
} | class ____ extends DefaultEndpoint {
private static final Logger LOG = LoggerFactory.getLogger(GridFsEndpoint.class);
@UriPath
@Metadata(required = true)
private String connectionBean;
@UriParam
@Metadata(required = true)
private String database;
@UriParam(defaultValue = "fs")
private String bucket;
@UriParam(enums = "ACKNOWLEDGED,W1,W2,W3,UNACKNOWLEDGED,JOURNALED,MAJORITY")
private WriteConcern writeConcern;
@UriParam
private ReadPreference readPreference;
@UriParam(label = "producer")
private String operation;
@UriParam(label = "consumer")
private String query;
@UriParam(label = "consumer", defaultValue = "1000", javaType = "java.time.Duration")
private long initialDelay = 1000;
@UriParam(label = "consumer", defaultValue = "500", javaType = "java.time.Duration")
private long delay = 500;
@UriParam(label = "consumer", defaultValue = "TimeStamp")
private QueryStrategy queryStrategy = QueryStrategy.TimeStamp;
@UriParam(label = "consumer", defaultValue = "camel-timestamps")
private String persistentTSCollection = "camel-timestamps";
@UriParam(label = "consumer", defaultValue = "camel-timestamp")
private String persistentTSObject = "camel-timestamp";
@UriParam(label = "consumer", defaultValue = "camel-processed")
private String fileAttributeName = "camel-processed";
private MongoClient mongoConnection;
private MongoDatabase db;
private GridFSBucket gridFSBucket;
private MongoCollection<GridFSFile> filesCollection;
public GridFsEndpoint(String uri, GridFsComponent component) {
super(uri, component);
}
@Override
public Producer createProducer() throws Exception {
initializeConnection();
return new GridFsProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
initializeConnection();
return new GridFsConsumer(this, processor);
}
public void initializeConnection() throws Exception {
LOG.info("Initialize GridFS endpoint: {}", this);
if (database == null) {
throw new IllegalStateException("Missing required endpoint configuration: database");
}
db = mongoConnection.getDatabase(database);
if (db == null) {
throw new IllegalStateException("Could not initialize GridFsComponent. Database " + database + " does not exist.");
}
if (bucket != null) {
gridFSBucket = GridFSBuckets.create(db, bucket);
} else {
gridFSBucket = GridFSBuckets.create(db);
}
this.filesCollection = db.getCollection(gridFSBucket.getBucketName() + ".files", GridFSFile.class);
}
@Override
protected void doInit() throws Exception {
mongoConnection = CamelContextHelper.mandatoryLookup(getCamelContext(), connectionBean, MongoClient.class);
LOG.debug("Resolved the connection with the name {} as {}", connectionBean, mongoConnection);
setWriteReadOptionsOnConnection();
super.doInit();
}
@Override
protected void doShutdown() throws Exception {
super.doShutdown();
if (mongoConnection != null) {
LOG.debug("Closing connection");
mongoConnection.close();
}
}
private void setWriteReadOptionsOnConnection() {
// Set the WriteConcern
if (writeConcern != null) {
db = db.withWriteConcern(writeConcern);
}
// Set the ReadPreference
if (readPreference != null) {
db = db.withReadPreference(readPreference);
}
}
// ======= Getters and setters ===============================================
public String getConnectionBean() {
return connectionBean;
}
/**
* Name of {@link com.mongodb.client.MongoClient} to use.
*/
public void setConnectionBean(String connectionBean) {
this.connectionBean = connectionBean;
}
public MongoClient getMongoConnection() {
return mongoConnection;
}
/**
* Sets the Mongo instance that represents the backing connection
*
* @param mongoConnection the connection to the database
*/
public void setMongoConnection(MongoClient mongoConnection) {
this.mongoConnection = mongoConnection;
}
public MongoDatabase getDB() {
return db;
}
public String getDatabase() {
return database;
}
/**
* Sets the name of the MongoDB database to target
*
* @param database name of the MongoDB database
*/
public void setDatabase(String database) {
this.database = database;
}
/**
* Sets the name of the GridFS bucket within the database. Default is fs.
*/
public String getBucket() {
return bucket;
}
public void setBucket(String bucket) {
this.bucket = bucket;
}
public String getQuery() {
return query;
}
/**
* Additional query parameters (in JSON) that are used to configure the query used for finding files in the
* GridFsConsumer
*/
public void setQuery(String query) {
this.query = query;
}
public long getDelay() {
return delay;
}
/**
* Sets the delay between polls within the Consumer. Default is 500ms
*/
public void setDelay(long delay) {
this.delay = delay;
}
public long getInitialDelay() {
return initialDelay;
}
/**
* Sets the initialDelay before the consumer will start polling. Default is 1000ms
*/
public void setInitialDelay(long initialDelay) {
this.initialDelay = initialDelay;
}
/**
* Sets the QueryStrategy that is used for polling for new files. Default is Timestamp
*/
public void setQueryStrategy(String s) {
queryStrategy = QueryStrategy.valueOf(s);
}
/**
* Sets the QueryStrategy that is used for polling for new files. Default is Timestamp
*/
public void setQueryStrategy(QueryStrategy queryStrategy) {
this.queryStrategy = queryStrategy;
}
public QueryStrategy getQueryStrategy() {
return queryStrategy;
}
/**
* If the QueryType uses a persistent timestamp, this sets the name of the collection within the DB to store the
* timestamp.
*/
public void setPersistentTSCollection(String s) {
persistentTSCollection = s;
}
public String getPersistentTSCollection() {
return persistentTSCollection;
}
/**
* If the QueryType uses a persistent timestamp, this is the ID of the object in the collection to store the
* timestamp.
*/
public void setPersistentTSObject(String id) {
persistentTSObject = id;
}
public String getPersistentTSObject() {
return persistentTSObject;
}
/**
* If the QueryType uses a FileAttribute, this sets the name of the attribute that is used. Default is
* "camel-processed".
*/
public void setFileAttributeName(String f) {
fileAttributeName = f;
}
public String getFileAttributeName() {
return fileAttributeName;
}
/**
* Set the {@link WriteConcern} for write operations on MongoDB using the standard ones. Resolved from the fields of
* the WriteConcern | GridFsEndpoint |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java | {
"start": 5005,
"end": 28158
} | enum ____ {
RUNNING(
new TaskState[]{TaskState.RUNNING}),
PENDING(new TaskState[]{TaskState.SCHEDULED}),
COMPLETED(new TaskState[]{TaskState.SUCCEEDED, TaskState.FAILED, TaskState.KILLED});
private final List<TaskState> correspondingStates;
private TaskStateUI(TaskState[] correspondingStates) {
this.correspondingStates = Arrays.asList(correspondingStates);
}
public boolean correspondsTo(TaskState state) {
return this.correspondingStates.contains(state);
}
}
public static TaskType taskType(String symbol) {
// JDK 7 supports switch on strings
if (symbol.equals("m")) return TaskType.MAP;
if (symbol.equals("r")) return TaskType.REDUCE;
throw new YarnRuntimeException("Unknown task symbol: "+ symbol);
}
public static TaskAttemptStateUI taskAttemptState(String attemptStateStr) {
return TaskAttemptStateUI.valueOf(attemptStateStr);
}
public static TaskStateUI taskState(String taskStateStr) {
return TaskStateUI.valueOf(taskStateStr);
}
// gets the base name of the MapReduce framework or null if no
// framework was configured
private static String getMRFrameworkName(Configuration conf) {
String frameworkName = null;
String framework =
conf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, "");
if (!framework.isEmpty()) {
URI uri;
try {
uri = new URI(framework);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Unable to parse '" + framework
+ "' as a URI, check the setting for "
+ MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
}
frameworkName = uri.getFragment();
if (frameworkName == null) {
frameworkName = new Path(uri).getName();
}
}
return frameworkName;
}
private static void setMRFrameworkClasspath(
Map<String, String> environment, Configuration conf) throws IOException {
// Propagate the system classpath when using the mini cluster
if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
System.getProperty("java.class.path"), conf);
}
boolean crossPlatform =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM);
// if the framework is specified then only use the MR classpath
String frameworkName = getMRFrameworkName(conf);
if (frameworkName == null) {
// Add standard Hadoop classes
for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
crossPlatform
? YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH
: YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
c.trim(), conf);
}
}
boolean foundFrameworkInClasspath = (frameworkName == null);
for (String c : conf.getStrings(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
crossPlatform ?
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH)
: StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH))) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
c.trim(), conf);
if (!foundFrameworkInClasspath) {
foundFrameworkInClasspath = c.contains(frameworkName);
}
}
if (!foundFrameworkInClasspath) {
throw new IllegalArgumentException(
"Could not locate MapReduce framework name '" + frameworkName
+ "' in " + MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH);
}
// TODO: Remove duplicates.
}
@SuppressWarnings("deprecation")
public static void setClasspath(Map<String, String> environment,
Configuration conf) throws IOException {
boolean userClassesTakesPrecedence =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
String classpathEnvVar =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)
? Environment.APP_CLASSPATH.name() : Environment.CLASSPATH.name();
MRApps.addToEnvironment(environment,
classpathEnvVar, crossPlatformifyMREnv(conf, Environment.PWD), conf);
if (!userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
/*
* We use "*" for the name of the JOB_JAR instead of MRJobConfig.JOB_JAR for
* the case where the job jar is not necessarily named "job.jar". This can
* happen, for example, when the job is leveraging a resource from the YARN
* shared cache.
*/
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "*", conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "classes" + Path.SEPARATOR, conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "lib" + Path.SEPARATOR + "*", conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
crossPlatformifyMREnv(conf, Environment.PWD) + Path.SEPARATOR + "*", conf);
// a * in the classpath will only find a .jar, so we need to filter out
// all .jars and add everything else
addToClasspathIfNotJar(JobContextImpl.getFileClassPaths(conf),
JobContextImpl.getCacheFiles(conf),
conf,
environment, classpathEnvVar);
addToClasspathIfNotJar(JobContextImpl.getArchiveClassPaths(conf),
JobContextImpl.getCacheArchives(conf),
conf,
environment, classpathEnvVar);
if (userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
}
/**
* Add the paths to the classpath if they are not jars
* @param paths the paths to add to the classpath
* @param withLinks the corresponding paths that may have a link name in them
* @param conf used to resolve the paths
* @param environment the environment to update CLASSPATH in
* @throws IOException if there is an error resolving any of the paths.
*/
private static void addToClasspathIfNotJar(Path[] paths,
URI[] withLinks, Configuration conf,
Map<String, String> environment,
String classpathEnvVar) throws IOException {
if (paths != null) {
HashMap<Path, String> linkLookup = new HashMap<Path, String>();
if (withLinks != null) {
for (URI u: withLinks) {
Path p = new Path(u);
FileSystem remoteFS = p.getFileSystem(conf);
String name = p.getName();
String wildcard = null;
// If the path is wildcarded, resolve its parent directory instead
if (name.equals(DistributedCache.WILDCARD)) {
wildcard = name;
p = p.getParent();
}
p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory()));
if ((wildcard != null) && (u.getFragment() != null)) {
throw new IOException("Invalid path URI: " + p + " - cannot "
+ "contain both a URI fragment and a wildcard");
} else if (wildcard != null) {
name = p.getName() + Path.SEPARATOR + wildcard;
} else if (u.getFragment() != null) {
name = u.getFragment();
}
// If it's not a JAR, add it to the link lookup.
if (!StringUtils.toLowerCase(name).endsWith(".jar")) {
String old = linkLookup.put(p, name);
if ((old != null) && !name.equals(old)) {
LOG.warn("The same path is included more than once "
+ "with different links or wildcards: " + p + " [" +
name + ", " + old + "]");
}
}
}
}
for (Path p : paths) {
FileSystem remoteFS = p.getFileSystem(conf);
p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory()));
String name = linkLookup.get(p);
if (name == null) {
name = p.getName();
}
if(!StringUtils.toLowerCase(name).endsWith(".jar")) {
MRApps.addToEnvironment(
environment,
classpathEnvVar,
crossPlatformifyMREnv(conf, Environment.PWD) + Path.SEPARATOR + name, conf);
}
}
}
}
/**
* Creates and sets a {@link ApplicationClassLoader} on the given
* configuration and as the thread context classloader, if
* {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
* the APP_CLASSPATH environment variable is set.
* @param conf
* @throws IOException
*/
public static void setJobClassLoader(Configuration conf)
throws IOException {
setClassLoader(createJobClassLoader(conf), conf);
}
/**
* Creates a {@link ApplicationClassLoader} if
* {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
* the APP_CLASSPATH environment variable is set.
* @param conf
* @return the created job classloader, or null if the job classloader is not
* enabled or the APP_CLASSPATH environment variable is not set
* @throws IOException
*/
public static ClassLoader createJobClassLoader(Configuration conf)
throws IOException {
ClassLoader jobClassLoader = null;
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)) {
String appClasspath = System.getenv(Environment.APP_CLASSPATH.key());
if (appClasspath == null) {
LOG.warn("Not creating job classloader since APP_CLASSPATH is not set.");
} else {
LOG.info("Creating job classloader");
if (LOG.isDebugEnabled()) {
LOG.debug("APP_CLASSPATH=" + appClasspath);
}
String[] systemClasses = getSystemClasses(conf);
jobClassLoader = createJobClassLoader(appClasspath,
systemClasses);
}
}
return jobClassLoader;
}
/**
* Sets the provided classloader on the given configuration and as the thread
* context classloader if the classloader is not null.
* @param classLoader
* @param conf
*/
public static void setClassLoader(ClassLoader classLoader,
Configuration conf) {
if (classLoader != null) {
LOG.info("Setting classloader " + classLoader +
" on the configuration and as the thread context classloader");
conf.setClassLoader(classLoader);
Thread.currentThread().setContextClassLoader(classLoader);
}
}
@VisibleForTesting
static String[] getSystemClasses(Configuration conf) {
return conf.getTrimmedStrings(
MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
}
private static ClassLoader createJobClassLoader(final String appClasspath,
final String[] systemClasses) throws IOException {
try {
return AccessController.doPrivileged(
new PrivilegedExceptionAction<ClassLoader>() {
@Override
public ClassLoader run() throws MalformedURLException {
return new ApplicationClassLoader(appClasspath,
MRApps.class.getClassLoader(), Arrays.asList(systemClasses));
}
});
} catch (PrivilegedActionException e) {
Throwable t = e.getCause();
if (t instanceof MalformedURLException) {
throw (MalformedURLException) t;
}
throw new IOException(e);
}
}
private static final String STAGING_CONSTANT = ".staging";
public static Path getStagingAreaDir(Configuration conf, String user) {
return new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR,
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ Path.SEPARATOR + user + Path.SEPARATOR + STAGING_CONSTANT);
}
public static String getJobFile(Configuration conf, String user,
org.apache.hadoop.mapreduce.JobID jobId) {
Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
return jobFile.toString();
}
public static Path getEndJobCommitSuccessFile(Configuration conf, String user,
JobId jobId) {
Path endCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_SUCCESS");
return endCommitFile;
}
public static Path getEndJobCommitFailureFile(Configuration conf, String user,
JobId jobId) {
Path endCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_FAIL");
return endCommitFile;
}
public static Path getStartJobCommitFile(Configuration conf, String user,
JobId jobId) {
Path startCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_STARTED");
return startCommitFile;
}
@SuppressWarnings("deprecation")
public static void setupDistributedCache(Configuration conf,
Map<String, LocalResource> localResources) throws IOException {
LocalResourceBuilder lrb = new LocalResourceBuilder();
lrb.setConf(conf);
// Cache archives
lrb.setType(LocalResourceType.ARCHIVE);
lrb.setUris(JobContextImpl.getCacheArchives(conf));
lrb.setTimestamps(JobContextImpl.getArchiveTimestamps(conf));
lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES));
lrb.setVisibilities(DistributedCache.getArchiveVisibilities(conf));
lrb.setSharedCacheUploadPolicies(
Job.getArchiveSharedCacheUploadPolicies(conf));
lrb.createLocalResources(localResources);
// Cache files
lrb.setType(LocalResourceType.FILE);
lrb.setUris(JobContextImpl.getCacheFiles(conf));
lrb.setTimestamps(JobContextImpl.getFileTimestamps(conf));
lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES));
lrb.setVisibilities(DistributedCache.getFileVisibilities(conf));
lrb.setSharedCacheUploadPolicies(
Job.getFileSharedCacheUploadPolicies(conf));
lrb.createLocalResources(localResources);
}
/**
* Set up the DistributedCache related configs to make
* {@link JobContextImpl#getLocalCacheFiles(Configuration)}
* and
* {@link JobContextImpl#getLocalCacheArchives(Configuration)}
* working.
* @param conf
* @throws java.io.IOException
*/
public static void setupDistributedCacheLocal(Configuration conf)
throws IOException {
String localWorkDir = System.getenv("PWD");
// ^ ^ all symlinks are created in the current work-dir
// Update the configuration object with localized archives.
URI[] cacheArchives = JobContextImpl.getCacheArchives(conf);
if (cacheArchives != null) {
List<String> localArchives = new ArrayList<String>();
for (int i = 0; i < cacheArchives.length; ++i) {
URI u = cacheArchives[i];
Path p = new Path(u);
Path name =
new Path((null == u.getFragment()) ? p.getName()
: u.getFragment());
String linkName = name.toUri().getPath();
localArchives.add(new Path(localWorkDir, linkName).toUri().getPath());
}
if (!localArchives.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils
.arrayToString(localArchives.toArray(new String[localArchives
.size()])));
}
}
// Update the configuration object with localized files.
URI[] cacheFiles = JobContextImpl.getCacheFiles(conf);
if (cacheFiles != null) {
List<String> localFiles = new ArrayList<String>();
for (int i = 0; i < cacheFiles.length; ++i) {
URI u = cacheFiles[i];
Path p = new Path(u);
Path name =
new Path((null == u.getFragment()) ? p.getName()
: u.getFragment());
String linkName = name.toUri().getPath();
localFiles.add(new Path(localWorkDir, linkName).toUri().getPath());
}
if (!localFiles.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALFILES,
StringUtils.arrayToString(localFiles
.toArray(new String[localFiles.size()])));
}
}
}
// TODO - Move this to MR!
private static long[] getFileSizes(Configuration conf, String key) {
String[] strs = conf.getStrings(key);
if (strs == null) {
return null;
}
long[] result = new long[strs.length];
for(int i=0; i < strs.length; ++i) {
result[i] = Long.parseLong(strs[i]);
}
return result;
}
public static String getChildLogLevel(Configuration conf, boolean isMap) {
if (isMap) {
return conf.get(
MRJobConfig.MAP_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL
);
} else {
return conf.get(
MRJobConfig.REDUCE_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL
);
}
}
/**
* Add the JVM system properties necessary to configure
* {@link ContainerLogAppender} or
* {@link ContainerRollingLogAppender}.
*
* @param task for map/reduce, or null for app master
* @param vargs the argument list to append to
* @param conf configuration of MR job
*/
public static void addLog4jSystemProperties(Task task,
List<String> vargs, Configuration conf) {
String log4jPropertyFile =
conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, "");
if (log4jPropertyFile.isEmpty()) {
vargs.add("-Dlog4j.configuration=container-log4j.properties");
} else {
URI log4jURI = null;
try {
log4jURI = new URI(log4jPropertyFile);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path log4jPath = new Path(log4jURI);
vargs.add("-Dlog4j.configuration="+log4jPath.getName());
}
long logSize;
String logLevel;
int numBackups;
if (task == null) {
logSize = conf.getLong(MRJobConfig.MR_AM_LOG_KB,
MRJobConfig.DEFAULT_MR_AM_LOG_KB) << 10;
logLevel = conf.get(
MRJobConfig.MR_AM_LOG_LEVEL, MRJobConfig.DEFAULT_MR_AM_LOG_LEVEL);
numBackups = conf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,
MRJobConfig.DEFAULT_MR_AM_LOG_BACKUPS);
} else {
logSize = TaskLog.getTaskLogLimitBytes(conf);
logLevel = getChildLogLevel(conf, task.isMapTask());
numBackups = conf.getInt(MRJobConfig.TASK_LOG_BACKUPS,
MRJobConfig.DEFAULT_TASK_LOG_BACKUPS);
}
vargs.add("-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR + "=" +
ApplicationConstants.LOG_DIR_EXPANSION_VAR);
vargs.add(
"-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_SIZE + "=" + logSize);
if (logSize > 0L && numBackups > 0) {
// log should be rolled
vargs.add("-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_BACKUPS + "="
+ numBackups);
vargs.add("-Dhadoop.root.logger=" + logLevel + ",CRLA");
} else {
vargs.add("-Dhadoop.root.logger=" + logLevel + ",CLA");
}
vargs.add("-Dhadoop.root.logfile=" + TaskLog.LogName.SYSLOG);
if ( task != null
&& !task.isMapTask()
&& conf.getBoolean(MRJobConfig.REDUCE_SEPARATE_SHUFFLE_LOG,
MRJobConfig.DEFAULT_REDUCE_SEPARATE_SHUFFLE_LOG)) {
final int numShuffleBackups = conf.getInt(MRJobConfig.SHUFFLE_LOG_BACKUPS,
MRJobConfig.DEFAULT_SHUFFLE_LOG_BACKUPS);
final long shuffleLogSize = conf.getLong(MRJobConfig.SHUFFLE_LOG_KB,
MRJobConfig.DEFAULT_SHUFFLE_LOG_KB) << 10;
final String shuffleLogger = logLevel
+ (shuffleLogSize > 0L && numShuffleBackups > 0
? ",shuffleCRLA"
: ",shuffleCLA");
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.logger=" + shuffleLogger);
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.logfile=" + TaskLog.LogName.SYSLOG + ".shuffle");
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.log.filesize=" + shuffleLogSize);
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.log.backups=" + numShuffleBackups);
}
}
/**
* Return lines for system property keys and values per configuration.
*
* @return the formatted string for the system property lines or null if no
* properties are specified.
*/
public static String getSystemPropertiesToLog(Configuration conf) {
String key = conf.get(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,
MRJobConfig.DEFAULT_MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG);
if (key != null) {
key = key.trim(); // trim leading and trailing whitespace from the config
if (!key.isEmpty()) {
String[] props = key.split(",");
if (props.length > 0) {
StringBuilder sb = new StringBuilder();
sb.append("\n/************************************************************\n");
sb.append("[system properties]\n");
for (String prop: props) {
prop = prop.trim(); // trim leading and trailing whitespace
if (!prop.isEmpty()) {
sb.append(prop).append(": ").append(System.getProperty(prop)).append('\n');
}
}
sb.append("************************************************************/");
return sb.toString();
}
}
}
return null;
}
public static void setEnvFromInputString(Map<String, String> env,
String envString, Configuration conf) {
String classPathSeparator =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
Apps.setEnvFromInputString(env, envString, classPathSeparator);
}
public static void setEnvFromInputProperty(Map<String, String> env,
String propName, String defaultPropValue, Configuration conf) {
String classPathSeparator =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
Apps.setEnvFromInputProperty(env, propName, defaultPropValue, conf,
classPathSeparator);
}
@Public
@Unstable
public static void addToEnvironment(Map<String, String> environment,
String variable, String value, Configuration conf) {
String classPathSeparator =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
Apps.addToEnvironment(environment, variable, value, classPathSeparator);
}
public static String crossPlatformifyMREnv(Configuration conf, Environment env) {
boolean crossPlatform =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM);
return crossPlatform ? env.$$() : env.$();
}
}
| TaskStateUI |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 81453,
"end": 81855
} | class ____ {
@FormUrlEncoded //
@POST("/foo") //
Call<ResponseBody> method(
@Field("foo") String foo, @Field("ping") String ping, @Field("kit") String kit) {
return null;
}
}
Request request = buildRequest(Example.class, "bar", null, "kat");
assertBody(request.body(), "foo=bar&kit=kat");
}
@Test
public void formEncodedFieldList() {
| Example |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/JavaInstantGetSecondsGetNanoTest.java | {
"start": 7100,
"end": 7555
} | class ____ {
// BUG: Diagnostic contains: JavaInstantGetSecondsGetNano
private final int nanos = Instant.EPOCH.getNano();
}
""")
.doTest();
}
@Test
public void getNanoInInnerClassGetSecondsInMethod() {
compilationHelper
.addSourceLines(
"test/TestCase.java",
"""
package test;
import java.time.Instant;
public | TestCase |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_287.java | {
"start": 826,
"end": 1289
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT abs(x) FILTER (where y = 1) FROM (VALUES (1, 1)) t(x, y)";
SQLStatement stmt = SQLUtils
.parseSingleStatement(sql, DbType.mysql);
assertEquals("SELECT abs(x) FILTER (WHERE y = 1)\n" +
"FROM (\n" +
"\tVALUES (1, 1)\n" +
") AS t (x, y)", stmt.toString());
}
}
| MySqlSelectTest_287 |
java | apache__dubbo | dubbo-spring-boot-project/dubbo-spring-boot/src/test/java/org/apache/dubbo/spring/boot/context/event/DubboConfigBeanDefinitionConflictApplicationListenerTest.java | {
"start": 3219,
"end": 3329
} | class ____ {}
@ImportResource("classpath:/META-INF/spring/dubbo-context.xml")
static | PropertySourceConfig |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/MappedActionFilters.java | {
"start": 1850,
"end": 2754
} | class ____<Request extends ActionRequest, Response extends ActionResponse>
implements
ActionFilterChain<Request, Response> {
final List<MappedActionFilter> filters;
final ActionFilterChain<Request, Response> outerChain;
int index = 0;
MappedFilterChain(List<MappedActionFilter> filters, ActionFilterChain<Request, Response> outerChain) {
this.filters = filters;
this.outerChain = outerChain;
}
@Override
public void proceed(Task task, String action, Request request, ActionListener<Response> listener) {
if (index < filters.size()) {
var filter = filters.get(index++);
filter.apply(task, action, request, listener, this);
} else {
outerChain.proceed(task, action, request, listener);
}
}
}
}
| MappedFilterChain |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/firewall/StrictHttpFirewallTests.java | {
"start": 1219,
"end": 38566
} | class ____ {
public String[] unnormalizedPaths = { "/..", "/./path/", "/path/path/.", "/path/path//.", "./path/../path//.",
"./path", ".//path", ".", "//path", "//path/path", "//path//path", "/path//path" };
private StrictHttpFirewall firewall = new StrictHttpFirewall();
private MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
@Test
public void getFirewalledRequestWhenInvalidMethodThenThrowsRequestRejectedException() {
this.request.setMethod("INVALID");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
// blocks XST attacks
@Test
public void getFirewalledRequestWhenTraceMethodThenThrowsRequestRejectedException() {
this.request.setMethod(HttpMethod.TRACE.name());
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
// blocks XST attack if request is forwarded to a Microsoft IIS web server
public void getFirewalledRequestWhenTrackMethodThenThrowsRequestRejectedException() {
this.request.setMethod("TRACK");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
// HTTP methods are case sensitive
public void getFirewalledRequestWhenLowercaseGetThenThrowsRequestRejectedException() {
this.request.setMethod("get");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenAllowedThenNoException() {
List<String> allowedMethods = Arrays.asList("DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT");
for (String allowedMethod : allowedMethods) {
this.request = new MockHttpServletRequest(allowedMethod, "");
this.firewall.getFirewalledRequest(this.request);
}
}
@Test
public void getFirewalledRequestWhenInvalidMethodAndAnyMethodThenNoException() {
this.firewall.setUnsafeAllowAnyHttpMethod(true);
this.request.setMethod("INVALID");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenRequestURINotNormalizedThenThrowsRequestRejectedException() {
for (String path : this.unnormalizedPaths) {
this.request = new MockHttpServletRequest("GET", "");
this.request.setRequestURI(path);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
}
@Test
public void getFirewalledRequestWhenContextPathNotNormalizedThenThrowsRequestRejectedException() {
for (String path : this.unnormalizedPaths) {
this.request = new MockHttpServletRequest("GET", "");
this.request.setContextPath(path);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
}
@Test
public void getFirewalledRequestWhenServletPathNotNormalizedThenThrowsRequestRejectedException() {
for (String path : this.unnormalizedPaths) {
this.request = get().requestUri(path).build();
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
}
@Test
public void getFirewalledRequestWhenPathInfoNotNormalizedThenThrowsRequestRejectedException() {
for (String path : this.unnormalizedPaths) {
this.request = new MockHttpServletRequest("GET", "");
this.request.setPathInfo(path);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
}
@Test
public void getFirewalledRequestWhenSemicolonInContextPathThenThrowsRequestRejectedException() {
this.request.setContextPath(";/context");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenSemicolonInServletPathThenThrowsRequestRejectedException() {
this.request.setServletPath("/spring;/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenSemicolonInPathInfoThenThrowsRequestRejectedException() {
this.request.setPathInfo("/path;/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenSemicolonInRequestUriThenThrowsRequestRejectedException() {
this.request.setRequestURI("/path;/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInContextPathThenThrowsRequestRejectedException() {
this.request.setContextPath("%3B/context");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInServletPathThenThrowsRequestRejectedException() {
this.request.setServletPath("/spring%3B/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInPathInfoThenThrowsRequestRejectedException() {
this.request.setPathInfo("/path%3B/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInRequestUriThenThrowsRequestRejectedException() {
this.request.setRequestURI("/path%3B/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInContextPathThenThrowsRequestRejectedException() {
this.request.setContextPath("%3b/context");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInServletPathThenThrowsRequestRejectedException() {
this.request.setServletPath("/spring%3b/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInPathInfoThenThrowsRequestRejectedException() {
this.request.setPathInfo("/path%3b/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInRequestUriThenThrowsRequestRejectedException() {
this.request.setRequestURI("/path%3b/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenSemicolonInContextPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setContextPath(";/context");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenSemicolonInServletPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setServletPath("/spring;/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenSemicolonInPathInfoAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setPathInfo("/path;/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenSemicolonInRequestUriAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setRequestURI("/path;/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInContextPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setContextPath("%3B/context");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInServletPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setServletPath("/spring%3B/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInPathInfoAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setPathInfo("/path%3B/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenEncodedSemicolonInRequestUriAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setRequestURI("/path%3B/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInContextPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setContextPath("%3b/context");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInServletPathAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setServletPath("/spring%3b/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInPathInfoAndAllowSemicolonThenNoException() {
this.firewall.setAllowUrlEncodedPercent(true);
this.firewall.setAllowSemicolon(true);
this.request.setPathInfo("/path%3b/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedSemicolonInRequestUriAndAllowSemicolonThenNoException() {
this.firewall.setAllowSemicolon(true);
this.request.setRequestURI("/path%3b/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenEncodedPeriodInThenThrowsRequestRejectedException() {
this.request.setRequestURI("/%2E/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenLowercaseEncodedPeriodInThenThrowsRequestRejectedException() {
this.request.setRequestURI("/%2e/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenAllowEncodedPeriodAndEncodedPeriodInThenNoException() {
this.firewall.setAllowUrlEncodedPeriod(true);
this.request.setRequestURI("/%2E/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenExceedsLowerboundAsciiThenException() {
this.request.setRequestURI("/\u0019");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsLowerboundAsciiThenNoException() {
this.request.setRequestURI("/ ");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsUpperboundAsciiThenNoException() {
this.request.setRequestURI("/~");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenJapaneseCharacterThenNoException() {
this.request.setServletPath("/\u3042");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenExceedsUpperboundAsciiThenException() {
this.request.setRequestURI("/\u007f");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsNullThenException() {
this.request.setRequestURI("/\0");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsEncodedNullThenException() {
this.request.setRequestURI("/something%00/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsLowercaseEncodedLineFeedThenException() {
this.request.setRequestURI("/something%0a/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsUppercaseEncodedLineFeedThenException() {
this.request.setRequestURI("/something%0A/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsLineFeedThenException() {
this.request.setRequestURI("/something\n/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsLineFeedThenException() {
this.request.setServletPath("/something\n/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsLowercaseEncodedCarriageReturnThenException() {
this.request.setRequestURI("/something%0d/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsUppercaseEncodedCarriageReturnThenException() {
this.request.setRequestURI("/something%0D/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsCarriageReturnThenException() {
this.request.setRequestURI("/something\r/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsCarriageReturnThenException() {
this.request.setServletPath("/something\r/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsLineSeparatorThenException() {
this.request.setServletPath("/something\u2028/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsParagraphSeparatorThenException() {
this.request.setServletPath("/something\u2029/");
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenContainsLowercaseEncodedLineFeedAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedLineFeed(true);
this.request.setRequestURI("/something%0a/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsUppercaseEncodedLineFeedAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedLineFeed(true);
this.request.setRequestURI("/something%0A/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsLineFeedAndAllowedThenException() {
this.firewall.setAllowUrlEncodedLineFeed(true);
this.request.setRequestURI("/something\n/");
// Expected an error because the line feed is decoded in an encoded part of the
// URL
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsLineFeedAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedLineFeed(true);
this.request.setServletPath("/something\n/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsLowercaseEncodedCarriageReturnAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedCarriageReturn(true);
this.request.setRequestURI("/something%0d/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsUppercaseEncodedCarriageReturnAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedCarriageReturn(true);
this.request.setRequestURI("/something%0D/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenContainsCarriageReturnAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedCarriageReturn(true);
this.request.setRequestURI("/something\r/");
// Expected an error because the carriage return is decoded in an encoded part of
// the URL
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenServletPathContainsCarriageReturnAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedCarriageReturn(true);
this.request.setServletPath("/something\r/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenServletPathContainsLineSeparatorAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedLineSeparator(true);
this.request.setServletPath("/something\u2028/");
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenServletPathContainsParagraphSeparatorAndAllowedThenNoException() {
this.firewall.setAllowUrlEncodedParagraphSeparator(true);
this.request.setServletPath("/something\u2029/");
this.firewall.getFirewalledRequest(this.request);
}
/**
* On WebSphere 8.5 a URL like /context-root/a/b;%2f1/c can bypass a rule on /a/b/c
* because the pathInfo is /a/b;/1/c which ends up being /a/b/1/c while Spring MVC
* will strip the ; content from requestURI before the path is URL decoded.
*/
@Test
public void getFirewalledRequestWhenLowercaseEncodedPathThenException() {
this.request.setRequestURI("/context-root/a/b;%2f1/c");
this.request.setContextPath("/context-root");
this.request.setServletPath("");
this.request.setPathInfo("/a/b;/1/c"); // URL decoded requestURI
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenUppercaseEncodedPathThenException() {
this.request.setRequestURI("/context-root/a/b;%2F1/c");
this.request.setContextPath("/context-root");
this.request.setServletPath("");
this.request.setPathInfo("/a/b;/1/c"); // URL decoded requestURI
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestWhenAllowUrlEncodedSlashAndLowercaseEncodedPathThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowSemicolon(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b;%2f1/c");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b;/1/c"); // URL decoded requestURI
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenAllowUrlEncodedSlashAndUppercaseEncodedPathThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowSemicolon(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b;%2F1/c");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b;/1/c"); // URL decoded requestURI
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenAllowUrlLowerCaseEncodedDoubleSlashThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowUrlEncodedDoubleSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2fc");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b//c");
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenAllowUrlUpperCaseEncodedDoubleSlashThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowUrlEncodedDoubleSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2Fc");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b//c");
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenAllowUrlLowerCaseAndUpperCaseEncodedDoubleSlashThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowUrlEncodedDoubleSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2Fc");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b//c");
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenAllowUrlUpperCaseAndLowerCaseEncodedDoubleSlashThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
this.firewall.setAllowUrlEncodedDoubleSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2fc");
request.setContextPath("/context-root");
request.setServletPath("");
request.setPathInfo("/a/b//c");
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromUpperCaseEncodedUrlBlacklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2Fc");
this.firewall.getEncodedUrlBlacklist().removeAll(Arrays.asList("%2F%2F"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromLowerCaseEncodedUrlBlacklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2fc");
this.firewall.getEncodedUrlBlacklist().removeAll(Arrays.asList("%2f%2f"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromLowerCaseAndUpperCaseEncodedUrlBlacklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2Fc");
this.firewall.getEncodedUrlBlacklist().removeAll(Arrays.asList("%2f%2F"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromUpperCaseAndLowerCaseEncodedUrlBlacklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2fc");
this.firewall.getEncodedUrlBlacklist().removeAll(Arrays.asList("%2F%2f"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromDecodedUrlBlacklistThenNoException() {
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setPathInfo("/a/b//c");
this.firewall.getDecodedUrlBlacklist().removeAll(Arrays.asList("//"));
this.firewall.getFirewalledRequest(request);
}
// blocklist
@Test
public void getFirewalledRequestWhenRemoveFromUpperCaseEncodedUrlBlocklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2Fc");
this.firewall.getEncodedUrlBlocklist().removeAll(Arrays.asList("%2F%2F"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromLowerCaseEncodedUrlBlocklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2fc");
this.firewall.getEncodedUrlBlocklist().removeAll(Arrays.asList("%2f%2f"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromLowerCaseAndUpperCaseEncodedUrlBlocklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2f%2Fc");
this.firewall.getEncodedUrlBlocklist().removeAll(Arrays.asList("%2f%2F"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromUpperCaseAndLowerCaseEncodedUrlBlocklistThenNoException() {
this.firewall.setAllowUrlEncodedSlash(true);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setRequestURI("/context-root/a/b%2F%2fc");
this.firewall.getEncodedUrlBlocklist().removeAll(Arrays.asList("%2F%2f"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenRemoveFromDecodedUrlBlocklistThenNoException() {
MockHttpServletRequest request = new MockHttpServletRequest("GET", "");
request.setPathInfo("/a/b//c");
this.firewall.getDecodedUrlBlocklist().removeAll(Arrays.asList("//"));
this.firewall.getFirewalledRequest(request);
}
@Test
public void getFirewalledRequestWhenTrustedDomainThenNoException() {
this.request.addHeader("Host", "example.org");
this.firewall.setAllowedHostnames((hostname) -> hostname.equals("example.org"));
this.firewall.getFirewalledRequest(this.request);
}
@Test
public void getFirewalledRequestWhenUntrustedDomainThenException() {
this.request.addHeader("Host", "example.org");
this.firewall.setAllowedHostnames((hostname) -> hostname.equals("myexample.org"));
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> this.firewall.getFirewalledRequest(this.request));
}
@Test
public void getFirewalledRequestGetHeaderWhenNotAllowedHeaderNameThenException() {
this.firewall.setAllowedHeaderNames((name) -> !name.equals("bad name"));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("bad name"));
}
@Test
public void getFirewalledRequestWhenHeaderNameNotAllowedWithAugmentedHeaderNamesThenException() {
this.firewall
.setAllowedHeaderNames(StrictHttpFirewall.ALLOWED_HEADER_NAMES.and((name) -> !name.equals("bad name")));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("bad name"));
}
@Test
public void getFirewalledRequestGetHeaderWhenNotAllowedHeaderValueThenException() {
this.request.addHeader("good name", "bad value");
this.firewall.setAllowedHeaderValues((value) -> !value.equals("bad value"));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("good name"));
}
@Test
public void getFirewalledRequestWhenHeaderValueNotAllowedWithAugmentedHeaderValuesThenException() {
this.request.addHeader("good name", "bad value");
this.firewall.setAllowedHeaderValues(
StrictHttpFirewall.ALLOWED_HEADER_VALUES.and((value) -> !value.equals("bad value")));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("good name"));
}
@Test
public void getFirewalledRequestGetDateHeaderWhenControlCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getDateHeader("Bad\0Name"));
}
@Test
public void getFirewalledRequestGetIntHeaderWhenControlCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getIntHeader("Bad\0Name"));
}
@Test
public void getFirewalledRequestGetHeaderWhenControlCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("Bad\0Name"));
}
@Test
public void getFirewalledRequestGetHeaderWhenUndefinedCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\uFFFEName", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("Bad\uFFFEName"));
}
@Test
public void getFirewalledRequestGetHeadersWhenControlCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeaders("Bad\0Name"));
}
@Test
public void getFirewalledRequestGetHeaderNamesWhenControlCharacterInHeaderNameThenException() {
this.request.addHeader("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getHeaderNames().nextElement());
}
@Test
public void getFirewalledRequestGetHeaderWhenControlCharacterInHeaderValueThenException() {
this.request.addHeader("Something", "bad\0value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("Something"));
}
@Test
public void getFirewalledRequestGetHeaderWhenHorizontalTabInHeaderValueThenNoException() {
this.request.addHeader("Something", "tab\tvalue");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThat(request.getHeader("Something")).isEqualTo("tab\tvalue");
}
@Test
public void getFirewalledRequestGetHeaderWhenUndefinedCharacterInHeaderValueThenException() {
this.request.addHeader("Something", "bad\uFFFEvalue");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getHeader("Something"));
}
@Test
public void getFirewalledRequestGetHeadersWhenControlCharacterInHeaderValueThenException() {
this.request.addHeader("Something", "bad\0value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getHeaders("Something").nextElement());
}
@Test
public void getFirewalledRequestGetParameterWhenControlCharacterInParameterNameThenException() {
this.request.addParameter("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(() -> request.getParameter("Bad\0Name"));
}
@Test
public void getFirewalledRequestGetParameterMapWhenControlCharacterInParameterNameThenException() {
this.request.addParameter("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(request::getParameterMap);
}
@Test
public void getFirewalledRequestGetParameterNamesWhenControlCharacterInParameterNameThenException() {
this.request.addParameter("Bad\0Name", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(request.getParameterNames()::nextElement);
}
@Test
public void getFirewalledRequestGetParameterNamesWhenUndefinedCharacterInParameterNameThenException() {
this.request.addParameter("Bad\uFFFEName", "some value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class).isThrownBy(request.getParameterNames()::nextElement);
}
@Test
public void getFirewalledRequestGetParameterValuesWhenNotAllowedInParameterValueThenException() {
this.firewall.setAllowedParameterValues((value) -> !value.equals("bad value"));
this.request.addParameter("Something", "bad value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getParameterValues("Something"));
}
@Test
public void getFirewalledRequestWhenParameterValueNotAllowedWithAugmentedParameterValuesThenException() {
this.request.addParameter("Something", "bad value");
this.firewall.setAllowedParameterValues(
StrictHttpFirewall.ALLOWED_PARAMETER_VALUES.and((value) -> !value.equals("bad value")));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getParameterValues("Something"));
}
@Test
public void getFirewalledRequestGetParameterValuesWhenNotAllowedInParameterNameThenException() {
this.firewall.setAllowedParameterNames((value) -> !value.equals("bad name"));
this.request.addParameter("bad name", "good value");
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getParameterValues("bad name"));
}
@Test
public void getFirewalledRequestWhenParameterNameNotAllowedWithAugmentedParameterNamesThenException() {
this.request.addParameter("bad name", "good value");
this.firewall.setAllowedParameterNames(
StrictHttpFirewall.ALLOWED_PARAMETER_NAMES.and((value) -> !value.equals("bad name")));
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(RequestRejectedException.class)
.isThrownBy(() -> request.getParameterValues("bad name"));
}
// gh-9598
@Test
public void getFirewalledRequestGetParameterWhenNameIsNullThenIllegalArgumentException() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> request.getParameter(null));
}
// gh-9598
@Test
public void getFirewalledRequestGetParameterValuesWhenNameIsNullThenIllegalArgumentException() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> request.getParameterValues(null));
}
// gh-9598
@Test
public void getFirewalledRequestGetHeaderWhenNameIsNullThenNull() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThat(request.getHeader(null)).isNull();
}
// gh-9598
@Test
public void getFirewalledRequestGetHeadersWhenNameIsNullThenEmptyEnumeration() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThat(request.getHeaders(null).hasMoreElements()).isFalse();
}
// gh-9598
@Test
public void getFirewalledRequestGetIntHeaderWhenNameIsNullThenNegativeOne() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThat(request.getIntHeader(null)).isEqualTo(-1);
}
@Test
public void getFirewalledRequestGetDateHeaderWhenNameIsNullThenNegativeOne() {
HttpServletRequest request = this.firewall.getFirewalledRequest(this.request);
assertThat(request.getDateHeader(null)).isEqualTo(-1);
}
}
| StrictHttpFirewallTests |
java | greenrobot__greendao | tests/DaoTest/src/androidTest/java/org/greenrobot/greendao/daotest/DeadlockPreventionTest.java | {
"start": 1372,
"end": 3922
} | class ____ extends AbstractDaoSessionTest<DaoMaster, DaoSession> {
CountDownLatch done = new CountDownLatch(1);
private TestEntityDao dao;
public DeadlockPreventionTest() {
super(DaoMaster.class);
}
// Runs pretty long, only run manually
public void _testLoadAll() throws InterruptedException {
dao = daoSession.getTestEntityDao();
List<TestEntity> entities = new ArrayList<>();
for (int i = 0; i < 10000; i++) {
TestEntity entity = new TestEntity();
entity.setSimpleStringNotNull("Text" + i);
entities.add(entity);
}
dao.insertInTx(entities);
System.out.println("Entities inserted");
LoadThread loadThread = new LoadThread();
InsertThread insertThread = new InsertThread();
InsertBatchThread insertBatchThread = new InsertBatchThread();
loadThread.start();
insertThread.start();
insertBatchThread.start();
int lastCounterInsert = insertThread.counter;
int lastCounterInsertBatch = insertBatchThread.counter;
int noProgressCount = 0;
while (!done.await(10, TimeUnit.SECONDS)) {
if (lastCounterInsert == insertThread.counter && lastCounterInsertBatch == insertBatchThread.counter) {
noProgressCount++;
System.err.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
System.err.println("No progress #" + noProgressCount + ", dumping threads");
System.err.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
dumpStacktrace("LOAD", loadThread);
dumpStacktrace("INSERT", insertThread);
dumpStacktrace("INSERT BATCH", insertBatchThread);
if (noProgressCount >= 3) {
// Test seems to be stuck, kill everything!
System.exit(1);
}
} else {
lastCounterInsert = insertThread.counter;
lastCounterInsertBatch = insertBatchThread.counter;
noProgressCount = 0;
}
}
loadThread.join();
insertThread.join();
insertBatchThread.join();
}
private void dumpStacktrace(String name, Thread thread) {
System.err.println("--- Thread dump of " + name + " ------------------------");
for (StackTraceElement element : thread.getStackTrace()) {
System.err.println(element);
}
}
private | DeadlockPreventionTest |
java | apache__camel | components/camel-mvel/src/test/java/org/apache/camel/language/mvel/MvelResourceTest.java | {
"start": 1051,
"end": 1763
} | class ____ extends CamelTestSupport {
@Test
public void testMvelResource() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("The result is 6");
template.sendBody("direct:start", 3);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.setHeader("multiplier", constant(2))
.transform().mvel("resource:classpath:mymvel.txt")
.to("mock:result");
}
};
}
}
| MvelResourceTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/AbstractOneInputTransformationTranslator.java | {
"start": 1692,
"end": 1833
} | class ____ one input {@link Transformation transformations} that provides a
* function for configuring common graph properties.
*/
abstract | for |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestProportionRouterRpcFairnessPolicyController.java | {
"start": 1980,
"end": 8852
} | class ____ {
private static String nameServices =
"ns1.nn1, ns1.nn2, ns2.nn1, ns2.nn2";
/**
* Do not configure handlers for ns,
* 0.1 of the total number of handlers will be used by default.
*/
@Test
public void testHandlerAllocationDefault() {
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController
= getFairnessPolicyController(30);
// By default, each ns has 3 (30*0.1) handlers.
// So the first 3 requests were successful.
for (int i=0; i<3; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2"));
assertTrue(
routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
}
// The 4th access failed because there was no available handler.
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns2"));
assertFalse(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
// Release a handler.
routerRpcFairnessPolicyController.releasePermit("ns1");
routerRpcFairnessPolicyController.releasePermit("ns2");
routerRpcFairnessPolicyController.releasePermit(CONCURRENT_NS);
// The next request is successful.
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2"));
assertTrue(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
}
/**
* The number of handlers is configured for ns.
*/
@Test
public void testHandlerAllocationPreconfigured() {
Configuration conf = createConf(40);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.5);
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController =
FederationUtil.newFairnessPolicyController(conf);
// ns1 should have 20 permits allocated
for (int i=0; i<20; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
}
// ns2 should have 4 permits.
// concurrent should have 4 permits.
for (int i=0; i<4; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2"));
assertTrue(
routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
}
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns2"));
assertFalse(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
}
/**
* The handlers have not been obtained after a certain period of time.
*/
@Test
public void testAcquireTimeout() {
Configuration conf = createConf(40);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.5);
conf.setTimeDuration(DFS_ROUTER_FAIRNESS_ACQUIRE_TIMEOUT, 100, TimeUnit.MILLISECONDS);
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController =
FederationUtil.newFairnessPolicyController(conf);
// ns1 should have 20 permits allocated
for (int i = 0; i < 20; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
}
long acquireBeginTimeMs = Time.monotonicNow();
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1"));
long acquireTimeMs = Time.monotonicNow() - acquireBeginTimeMs;
// There are some other operations, so acquireTimeMs >= 100ms.
assertTrue(acquireTimeMs >= 100);
}
/**
* If 0 handlers are configured for ns, one handler will be provided for ns by default.
*/
@Test
public void testAllocationWithZeroProportion() {
Configuration conf = createConf(40);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0);
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController =
FederationUtil.newFairnessPolicyController(conf);
// ns1 should have 1 permit allocated
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1"));
}
/**
* The sum of handlers of all ns is supported to be
* greater than the handlers available on the router, so that ns can share idle handlers.
*/
@Test
public void testAllocationHandlersGreaterThanCount() {
Configuration conf = createConf(40);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.8);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns2", 0.8);
conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + CONCURRENT_NS, 1);
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController =
FederationUtil.newFairnessPolicyController(conf);
// ns1 32 permit allocated
// ns2 32 permit allocated
for (int i = 0; i < 32; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1"));
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2"));
}
// CONCURRENT_NS 40 permit allocated
for (int i=0; i < 40; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS));
}
}
/**
* When accessing an unregistered ns, it can also be successful.
* Therefore, to support cluster expansion with new ns,
* you only need to add a mount to the router to access it without reconfiguring handlers.
*/
@Test
public void testTransparentExtension() {
Configuration conf = createConf(40);
RouterRpcFairnessPolicyController routerRpcFairnessPolicyController =
FederationUtil.newFairnessPolicyController(conf);
// Access unregistered ns.
// There are 4 (40*0.1) handlers by default.
for (int i=0; i<4; i++) {
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns_unregistered"));
}
// The 5th access failed because there was no available handler.
assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns_unregistered"));
// Release a handler, the next request is successful.
routerRpcFairnessPolicyController.releasePermit("ns_unregistered");
assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns_unregistered"));
}
private RouterRpcFairnessPolicyController getFairnessPolicyController(
int handlers) {
return FederationUtil.newFairnessPolicyController(createConf(handlers));
}
private Configuration createConf(int handlers) {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_ROUTER_HANDLER_COUNT_KEY, handlers);
conf.set(DFS_ROUTER_MONITOR_NAMENODE, nameServices);
conf.setClass(
RBFConfigKeys.DFS_ROUTER_FAIRNESS_POLICY_CONTROLLER_CLASS,
ProportionRouterRpcFairnessPolicyController.class,
RouterRpcFairnessPolicyController.class);
return conf;
}
}
| TestProportionRouterRpcFairnessPolicyController |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resteasy/async/filters/AsyncRequestFilterResource.java | {
"start": 565,
"end": 3818
} | class ____ {
private static final Logger LOG = Logger.getLogger(AsyncRequestFilterResource.class);
@GET
public Response threeSyncRequestFilters(@Context ServerRequestContext ctx,
@HeaderParam("Filter1") @DefaultValue("") String filter1,
@HeaderParam("Filter2") @DefaultValue("") String filter2,
@HeaderParam("Filter3") @DefaultValue("") String filter3,
@HeaderParam("PreMatchFilter1") @DefaultValue("") String preMatchFilter1,
@HeaderParam("PreMatchFilter2") @DefaultValue("") String preMatchFilter2,
@HeaderParam("PreMatchFilter3") @DefaultValue("") String preMatchFilter3) {
// boolean async = isAsync(filter1)
// || isAsync(filter2)
// || isAsync(filter3)
// || isAsync(preMatchFilter1)
// || isAsync(preMatchFilter2)
// || isAsync(preMatchFilter3);
// if (async != ctx.isSuspended())
// return Response.serverError().entity("Request suspension is wrong").build();
return Response.ok("resource").build();
}
@Path("non-response")
@GET
public String threeSyncRequestFiltersNonResponse(@Context ServerRequestContext ctx,
@HeaderParam("Filter1") @DefaultValue("") String filter1,
@HeaderParam("Filter2") @DefaultValue("") String filter2,
@HeaderParam("Filter3") @DefaultValue("") String filter3,
@HeaderParam("PreMatchFilter1") @DefaultValue("") String preMatchFilter1,
@HeaderParam("PreMatchFilter2") @DefaultValue("") String preMatchFilter2,
@HeaderParam("PreMatchFilter3") @DefaultValue("") String preMatchFilter3) {
// boolean async = isAsync(filter1)
// || isAsync(filter2)
// || isAsync(filter3)
// || isAsync(preMatchFilter1)
// || isAsync(preMatchFilter2)
// || isAsync(preMatchFilter3);
// if (async != ctx.isSuspended())
// throw new WebApplicationException(Response.serverError().entity("Request suspension is wrong").build());
return "resource";
}
@Path("async")
@GET
public CompletionStage<Response> async() {
ExecutorService executor = Executors.newSingleThreadExecutor();
CompletableFuture<Response> resp = new CompletableFuture<>();
executor.submit(() -> {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
LOG.error("Error:", e);
}
resp.complete(Response.ok("resource").build());
});
return resp;
}
@Path("callback")
@GET
public String callback() {
return "hello";
}
@Path("callback-async")
@GET
public CompletionStage<String> callbackAsync() {
return CompletableFuture.completedFuture("hello");
}
private boolean isAsync(String filter) {
return filter.equals("async-pass")
|| filter.equals("async-fail");
}
}
| AsyncRequestFilterResource |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/transaction/AbstractTransactionLifecycleTest.java | {
"start": 8411,
"end": 9369
} | class ____<T> {
public static <T> ValueAndExecutionMetadata<T> run(EntityManager entityManager, Function<EntityManager, T> action) {
LifecycleListener listener = new LifecycleListener();
entityManager.unwrap(Session.class).addEventListeners(listener);
T result = action.apply(entityManager);
return new ValueAndExecutionMetadata<>(result, entityManager, listener);
}
final T value;
final SessionImplementor sessionImplementor;
final LifecycleListener listener;
private ValueAndExecutionMetadata(T value, EntityManager entityManager, LifecycleListener listener) {
this.value = value;
// Make sure we don't return a wrapper, but the actual implementation.
this.sessionImplementor = entityManager.unwrap(SessionImplementor.class);
this.listener = listener;
}
}
private static | ValueAndExecutionMetadata |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/TestBackRefsWithPolymorphic.java | {
"start": 780,
"end": 1134
} | interface ____
extends Property<PropertySheet>
{
@Override PropertySheet getValue();
void setValue(PropertySheet propertySheet);
}
@JsonDeserialize(as = AbstractProperty.class)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS,
include = JsonTypeInfo.As.PROPERTY,
property = "@class")
| NestedPropertySheet |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/alternatives/priority/ComputedAlternativePriorityTest.java | {
"start": 570,
"end": 1881
} | class ____ {
@RegisterExtension
ArcTestContainer testContainer = ArcTestContainer.builder().beanClasses(MyInterface.class, Foo.class, Producers.class)
.alternativePriorities((target, stereotypes) -> {
if (target.kind() == AnnotationTarget.Kind.CLASS) {
if (target.asClass().name().toString().equals(Foo.class.getName())) {
return 100;
}
} else if (target.kind() == AnnotationTarget.Kind.FIELD || target.kind() == AnnotationTarget.Kind.METHOD) {
return 10;
}
return null;
}).build();
@Test
public void testComputedPriority() {
InstanceHandle<MyInterface> myInterface = Arc.container().instance(MyInterface.class);
assertTrue(myInterface.isAvailable());
assertEquals(Foo.class.getSimpleName(), myInterface.get().ping());
InstanceHandle<String> bravo = Arc.container().instance(String.class);
assertTrue(bravo.isAvailable());
assertEquals("bravo", bravo.get());
InstanceHandle<Integer> charlie = Arc.container().instance(Integer.class);
assertTrue(charlie.isAvailable());
assertEquals(10, charlie.get());
}
static | ComputedAlternativePriorityTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java | {
"start": 11383,
"end": 66267
} | class ____ extends BaseRouterWebServicesTest {
private final static int NUM_SUBCLUSTER = 4;
private static final int BAD_REQUEST = 400;
private static final int ACCEPTED = 202;
private static final String TEST_USER = "test-user";
private static final int OK = 200;
private static String user = "test-user";
private TestableFederationInterceptorREST interceptor;
private MemoryFederationStateStore stateStore;
private FederationStateStoreTestUtil stateStoreUtil;
private List<SubClusterId> subClusters;
private static final String TEST_RENEWER = "test-renewer";
@BeforeEach
public void setUp() throws YarnException, IOException {
super.setUpConfig();
interceptor = new TestableFederationInterceptorREST();
stateStore = new MemoryFederationStateStore();
stateStore.init(this.getConf());
FederationStateStoreFacade.getInstance(this.getConf()).reinitialize(stateStore,
this.getConf());
stateStoreUtil = new FederationStateStoreTestUtil(stateStore);
interceptor.setConf(this.getConf());
interceptor.init(TEST_USER);
subClusters = new ArrayList<>();
for (int i = 0; i < NUM_SUBCLUSTER; i++) {
SubClusterId sc = SubClusterId.newInstance(Integer.toString(i));
stateStoreUtil.registerSubCluster(sc);
subClusters.add(sc);
}
RouterClientRMService routerClientRMService = new RouterClientRMService();
routerClientRMService.initUserPipelineMap(getConf());
long secretKeyInterval = this.getConf().getLong(
RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY, RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
long tokenMaxLifetime = this.getConf().getLong(
RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY, RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
long tokenRenewInterval = this.getConf().getLong(
RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
long removeScanInterval = this.getConf().getTimeDuration(
RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_KEY,
RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
RouterDelegationTokenSecretManager tokenSecretManager = new RouterDelegationTokenSecretManager(
secretKeyInterval, tokenMaxLifetime, tokenRenewInterval, removeScanInterval,
this.getConf());
tokenSecretManager.startThreads();
routerClientRMService.setRouterDTSecretManager(tokenSecretManager);
TestableFederationClientInterceptor clientInterceptor =
new TestableFederationClientInterceptor();
clientInterceptor.setConf(this.getConf());
clientInterceptor.init(TEST_RENEWER);
clientInterceptor.setTokenSecretManager(tokenSecretManager);
RequestInterceptorChainWrapper wrapper = new RequestInterceptorChainWrapper();
wrapper.init(clientInterceptor);
routerClientRMService.getUserPipelineMap().put(TEST_RENEWER, wrapper);
interceptor.setRouterClientRMService(routerClientRMService);
for (SubClusterId subCluster : subClusters) {
SubClusterInfo subClusterInfo = stateStoreUtil.querySubClusterInfo(subCluster);
interceptor.getOrCreateInterceptorForSubCluster(
subCluster, subClusterInfo.getRMWebServiceAddress());
}
interceptor.setupResourceManager();
}
@AfterEach
@Override
public void tearDown() {
interceptor.shutdown();
super.tearDown();
}
@Override
protected YarnConfiguration createConfiguration() {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true);
conf.set(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS,
MockDefaultRequestInterceptorREST.class.getName());
String mockPassThroughInterceptorClass =
PassThroughRESTRequestInterceptor.class.getName();
// Create a request interceptor pipeline for testing. The last one in the
// chain is the federation interceptor that calls the mock resource manager.
// The others in the chain will simply forward it to the next one in the
// chain
conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE,
mockPassThroughInterceptorClass + ","
+ TestableFederationInterceptorREST.class.getName());
conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER,
UniformBroadcastPolicyManager.class.getName());
// Disable StateStoreFacade cache
conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0);
// Open AppsInfo Cache
conf.setBoolean(YarnConfiguration.ROUTER_APPSINFO_ENABLED, true);
conf.setInt(YarnConfiguration.ROUTER_APPSINFO_CACHED_COUNT, 10);
return conf;
}
/**
* This test validates the correctness of GetNewApplication. The return
* ApplicationId has to belong to one of the SubCluster in the cluster.
*/
@Test
public void testGetNewApplication() throws IOException, InterruptedException {
Response response = interceptor.createNewApplication(null);
assertNotNull(response);
NewApplication ci = (NewApplication) response.getEntity();
assertNotNull(ci);
ApplicationId appId = ApplicationId.fromString(ci.getApplicationId());
assertTrue(appId.getClusterTimestamp() < NUM_SUBCLUSTER);
assertTrue(appId.getClusterTimestamp() >= 0);
}
/**
* This test validates the correctness of SubmitApplication. The application
* has to be submitted to one of the SubCluster in the cluster.
*/
@Test
public void testSubmitApplication()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
Response response = interceptor.submitApplication(context, null);
assertEquals(ACCEPTED, response.getStatus());
SubClusterId ci = (SubClusterId) response.getEntity();
assertNotNull(response);
SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId);
assertNotNull(scIdResult);
assertTrue(subClusters.contains(scIdResult));
assertEquals(ci, scIdResult);
}
/**
* This test validates the correctness of SubmitApplication in case of
* multiple submission. The first retry has to be submitted to the same
* SubCluster of the first attempt.
*/
@Test
public void testSubmitApplicationMultipleSubmission()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// First attempt
Response response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertEquals(ACCEPTED, response.getStatus());
SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId);
assertNotNull(scIdResult);
// First retry
response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertEquals(ACCEPTED, response.getStatus());
SubClusterId scIdResult2 = stateStoreUtil.queryApplicationHomeSC(appId);
assertNotNull(scIdResult2);
assertEquals(scIdResult, scIdResult2);
}
/**
* This test validates the correctness of SubmitApplication in case of empty
* request.
*/
@Test
public void testSubmitApplicationEmptyRequest() throws IOException, InterruptedException {
// ApplicationSubmissionContextInfo null
Response response = interceptor.submitApplication(null, null);
assertEquals(BAD_REQUEST, response.getStatus());
// ApplicationSubmissionContextInfo empty
response = interceptor
.submitApplication(new ApplicationSubmissionContextInfo(), null);
assertEquals(BAD_REQUEST, response.getStatus());
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
response = interceptor.submitApplication(context, null);
assertEquals(BAD_REQUEST, response.getStatus());
}
/**
* This test validates the correctness of SubmitApplication in case of
* application in wrong format.
*/
@Test
public void testSubmitApplicationWrongFormat() throws IOException, InterruptedException {
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId("Application_wrong_id");
Response response = interceptor.submitApplication(context, null);
assertEquals(BAD_REQUEST, response.getStatus());
}
/**
* This test validates the correctness of ForceKillApplication in case the
* application exists in the cluster.
*/
@Test
public void testForceKillApplication()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Submit the application we are going to kill later
Response response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
AppState appState = new AppState("KILLED");
Response responseKill =
interceptor.updateAppState(appState, null, appId.toString());
assertNotNull(responseKill);
}
/**
* This test validates the correctness of ForceKillApplication in case of
* application does not exist in StateStore.
*/
@Test
public void testForceKillApplicationNotExists()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
AppState appState = new AppState("KILLED");
Response response =
interceptor.updateAppState(appState, null, appId.toString());
assertEquals(BAD_REQUEST, response.getStatus());
}
/**
* This test validates the correctness of ForceKillApplication in case of
* application in wrong format.
*/
@Test
public void testForceKillApplicationWrongFormat()
throws YarnException, IOException, InterruptedException {
AppState appState = new AppState("KILLED");
Response response =
interceptor.updateAppState(appState, null, "Application_wrong_id");
assertEquals(BAD_REQUEST, response.getStatus());
}
/**
* This test validates the correctness of ForceKillApplication in case of
* empty request.
*/
@Test
public void testForceKillApplicationEmptyRequest()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Submit the application we are going to kill later
interceptor.submitApplication(context, null);
Response response =
interceptor.updateAppState(null, null, appId.toString());
assertEquals(BAD_REQUEST, response.getStatus());
}
/**
* This test validates the correctness of GetApplicationReport in case the
* application exists in the cluster.
*/
@Test
public void testGetApplicationReport()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Submit the application we want the report later
Response response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
AppInfo responseGet = interceptor.getApp(null, appId.toString(), null);
assertNotNull(responseGet);
}
/**
* This test validates the correctness of GetApplicationReport in case the
* application does not exist in StateStore.
*/
@Test
public void testGetApplicationNotExists() {
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
AppInfo response = interceptor.getApp(null, appId.toString(), null);
assertNull(response);
}
/**
* This test validates the correctness of GetApplicationReport in case of
* application in wrong format.
*/
@Test
public void testGetApplicationWrongFormat() {
IllegalArgumentException illegalArgumentException =
assertThrows(IllegalArgumentException.class, () -> {
interceptor.getApp(null, "Application_wrong_id", null);
});
assertTrue(illegalArgumentException.getMessage().contains(
"Invalid ApplicationId prefix: Application_wrong_id."));
}
/**
* This test validates the correctness of GetApplicationsReport in case each
* subcluster provided one application.
*/
@Test
public void testGetApplicationsReport() {
AppsInfo responseGet = interceptor.getApps(null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null);
assertNotNull(responseGet);
assertEquals(NUM_SUBCLUSTER, responseGet.getApps().size());
// The merged operations is tested in TestRouterWebServiceUtil
}
/**
* This test validates the correctness of GetNodes in case each subcluster
* provided one node with the LastHealthUpdate set to the SubClusterId. The
* expected result would be the NodeInfo from the last SubCluster that has
* LastHealthUpdate equal to Num_SubCluster -1.
*/
@Test
public void testGetNode() {
NodeInfo responseGet = interceptor.getNode("testGetNode");
assertNotNull(responseGet);
assertEquals(NUM_SUBCLUSTER - 1, responseGet.getLastHealthUpdate());
}
/**
* This test validates the correctness of GetNodes in case each subcluster
* provided one node.
*/
@Test
public void testGetNodes() {
NodesInfo responseGet = interceptor.getNodes(null);
assertNotNull(responseGet);
assertEquals(NUM_SUBCLUSTER, responseGet.getNodes().size());
// The remove duplicate operations is tested in TestRouterWebServiceUtil
}
/**
* This test validates the correctness of updateNodeResource().
*/
@Test
public void testUpdateNodeResource() {
List<NodeInfo> nodes = interceptor.getNodes(null).getNodes();
assertFalse(nodes.isEmpty());
final String nodeId = nodes.get(0).getNodeId();
ResourceOptionInfo resourceOption = new ResourceOptionInfo(
ResourceOption.newInstance(
Resource.newInstance(2048, 3), 1000));
ResourceInfo resource = interceptor.updateNodeResource(
null, nodeId, resourceOption);
assertNotNull(resource);
assertEquals(2048, resource.getMemorySize());
assertEquals(3, resource.getvCores());
}
/**
* This test validates the correctness of getClusterMetricsInfo in case each
* SubCluster provided a ClusterMetricsInfo with appsSubmitted set to the
* SubClusterId. The expected result would be appSubmitted equals to the sum
* of SubClusterId. SubClusterId in this case is an integer.
*/
@Test
public void testGetClusterMetrics() {
ClusterMetricsInfo responseGet = interceptor.getClusterMetricsInfo();
assertNotNull(responseGet);
int expectedAppSubmitted = 0;
for (int i = 0; i < NUM_SUBCLUSTER; i++) {
expectedAppSubmitted += i;
}
assertEquals(expectedAppSubmitted, responseGet.getAppsSubmitted());
// The merge operations is tested in TestRouterWebServiceUtil
}
/**
* This test validates the correctness of GetApplicationState in case the
* application exists in the cluster.
*/
@Test
public void testGetApplicationState()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Submit the application we want the report later
Response response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
AppState responseGet = interceptor.getAppState(null, appId.toString());
assertNotNull(responseGet);
assertEquals(MockDefaultRequestInterceptorREST.APP_STATE_RUNNING,
responseGet.getState());
}
/**
* This test validates the correctness of GetApplicationState in case the
* application does not exist in StateStore.
*/
@Test
public void testGetApplicationStateNotExists() throws IOException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
AppState response = interceptor.getAppState(null, appId.toString());
assertNull(response);
}
/**
* This test validates the correctness of GetApplicationState in case of
* application in wrong format.
*/
@Test
public void testGetApplicationStateWrongFormat()
throws IOException {
AppState response = interceptor.getAppState(null, "Application_wrong_id");
assertNull(response);
}
/**
* This test validates the creation of new interceptor in case of a
* RMSwitchover in a subCluster.
*/
@Test
public void testRMSwitchoverOfOneSC() throws Exception {
SubClusterId subClusterId = SubClusterId.newInstance(Integer.toString(0));
interceptor.getClusterMetricsInfo();
assertEquals("http://1.2.3.4:4", interceptor
.getInterceptorForSubCluster(subClusterId).getWebAppAddress());
//Register the first subCluster with secondRM simulating RMSwitchover
registerSubClusterWithSwitchoverRM(subClusterId);
interceptor.getClusterMetricsInfo();
assertEquals("http://5.6.7.8:8", interceptor
.getInterceptorForSubCluster(subClusterId).getWebAppAddress());
}
private void registerSubClusterWithSwitchoverRM(SubClusterId subClusterId)
throws YarnException {
String amRMAddress = "5.6.7.8:5";
String clientRMAddress = "5.6.7.8:6";
String rmAdminAddress = "5.6.7.8:7";
String webAppAddress = "5.6.7.8:8";
SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress,
SubClusterState.SC_RUNNING, new MonotonicClock().getTime(),
"capability");
stateStore.registerSubCluster(
SubClusterRegisterRequest.newInstance(subClusterInfo));
}
@Test
public void testGetContainers()
throws YarnException, IOException, InterruptedException {
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Submit the application we want the report later
Response response = interceptor.submitApplication(context, null);
assertNotNull(response);
assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
ApplicationAttemptId appAttempt = ApplicationAttemptId.newInstance(appId, 1);
ContainersInfo responseGet = interceptor.getContainers(
null, null, appId.toString(), appAttempt.toString());
assertEquals(4, responseGet.getContainers().size());
}
@Test
public void testGetContainersNotExists() throws Exception {
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Parameter error, the appAttemptId is empty or null.",
() -> interceptor.getContainers(null, null, appId.toString(), null));
}
@Test
public void testGetContainersWrongFormat() throws Exception {
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationAttemptId appAttempt = ApplicationAttemptId.newInstance(appId, 1);
// Test Case 1: appId is wrong format, appAttemptId is accurate.
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Invalid ApplicationId prefix: Application_wrong_id. " +
"The valid ApplicationId should start with prefix application",
() -> interceptor.getContainers(null, null, "Application_wrong_id", appAttempt.toString()));
// Test Case2: appId is accurate, appAttemptId is wrong format.
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Invalid AppAttemptId prefix: AppAttempt_wrong_id",
() -> interceptor.getContainers(null, null, appId.toString(), "AppAttempt_wrong_id"));
}
@Test
public void testGetNodeToLabels() throws IOException {
NodeToLabelsInfo info = interceptor.getNodeToLabels(null);
HashMap<String, NodeLabelsInfo> map = info.getNodeToLabels();
assertNotNull(map);
assertEquals(2, map.size());
NodeLabelsInfo node1Value = map.getOrDefault("node1", null);
assertNotNull(node1Value);
assertEquals(1, node1Value.getNodeLabelsName().size());
assertEquals("CPU", node1Value.getNodeLabelsName().get(0));
NodeLabelsInfo node2Value = map.getOrDefault("node2", null);
assertNotNull(node2Value);
assertEquals(1, node2Value.getNodeLabelsName().size());
assertEquals("GPU", node2Value.getNodeLabelsName().get(0));
}
@Test
public void testGetLabelsToNodes() throws Exception {
LabelsToNodesInfo labelsToNodesInfo = interceptor.getLabelsToNodes(null);
Map<NodeLabelInfo, NodeIDsInfo> map = labelsToNodesInfo.getLabelsToNodes();
assertNotNull(map);
assertEquals(3, map.size());
NodeLabel labelX = NodeLabel.newInstance("x", false);
NodeLabelInfo nodeLabelInfoX = new NodeLabelInfo(labelX);
NodeIDsInfo nodeIDsInfoX = map.get(nodeLabelInfoX);
assertNotNull(nodeIDsInfoX);
assertEquals(2, nodeIDsInfoX.getNodeIDs().size());
Resource resourceX =
nodeIDsInfoX.getPartitionInfo().getResourceAvailable().getResource();
assertNotNull(resourceX);
assertEquals(4*10, resourceX.getVirtualCores());
assertEquals(4*20*1024, resourceX.getMemorySize());
NodeLabel labelY = NodeLabel.newInstance("y", false);
NodeLabelInfo nodeLabelInfoY = new NodeLabelInfo(labelY);
NodeIDsInfo nodeIDsInfoY = map.get(nodeLabelInfoY);
assertNotNull(nodeIDsInfoY);
assertEquals(2, nodeIDsInfoY.getNodeIDs().size());
Resource resourceY =
nodeIDsInfoY.getPartitionInfo().getResourceAvailable().getResource();
assertNotNull(resourceY);
assertEquals(4*20, resourceY.getVirtualCores());
assertEquals(4*40*1024, resourceY.getMemorySize());
}
@Test
public void testGetClusterNodeLabels() throws Exception {
NodeLabelsInfo nodeLabelsInfo = interceptor.getClusterNodeLabels(null);
assertNotNull(nodeLabelsInfo);
assertEquals(2, nodeLabelsInfo.getNodeLabelsName().size());
List<String> nodeLabelsName = nodeLabelsInfo.getNodeLabelsName();
assertNotNull(nodeLabelsName);
assertTrue(nodeLabelsName.contains("cpu"));
assertTrue(nodeLabelsName.contains("gpu"));
ArrayList<NodeLabelInfo> nodeLabelInfos = nodeLabelsInfo.getNodeLabelsInfo();
assertNotNull(nodeLabelInfos);
assertEquals(2, nodeLabelInfos.size());
NodeLabelInfo cpuNodeLabelInfo = new NodeLabelInfo("cpu", false);
assertTrue(nodeLabelInfos.contains(cpuNodeLabelInfo));
NodeLabelInfo gpuNodeLabelInfo = new NodeLabelInfo("gpu", false);
assertTrue(nodeLabelInfos.contains(gpuNodeLabelInfo));
}
@Test
public void testGetLabelsOnNode() throws Exception {
NodeLabelsInfo nodeLabelsInfo = interceptor.getLabelsOnNode(null, "node1");
assertNotNull(nodeLabelsInfo);
assertEquals(2, nodeLabelsInfo.getNodeLabelsName().size());
List<String> nodeLabelsName = nodeLabelsInfo.getNodeLabelsName();
assertNotNull(nodeLabelsName);
assertTrue(nodeLabelsName.contains("x"));
assertTrue(nodeLabelsName.contains("y"));
// null request
interceptor.setAllowPartialResult(false);
NodeLabelsInfo nodeLabelsInfo2 = interceptor.getLabelsOnNode(null, "node2");
assertNotNull(nodeLabelsInfo2);
assertEquals(0, nodeLabelsInfo2.getNodeLabelsName().size());
}
@Test
public void testGetContainer() throws Exception {
//
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId appContainerId = ContainerId.newContainerId(appAttemptId, 1);
String applicationId = appId.toString();
String attemptId = appAttemptId.toString();
String containerId = appContainerId.toString();
// Submit application to multiSubCluster
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(applicationId);
assertNotNull(interceptor.submitApplication(context, null));
// Test Case1: Wrong ContainerId
LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid ContainerId prefix: 0",
() -> interceptor.getContainer(null, null, applicationId, attemptId, "0"));
// Test Case2: Correct ContainerId
ContainerInfo containerInfo = interceptor.getContainer(null, null, applicationId,
attemptId, containerId);
assertNotNull(containerInfo);
}
@Test
public void testGetAppAttempts() throws IOException, InterruptedException {
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
assertNotNull(interceptor.submitApplication(context, null));
AppAttemptsInfo appAttemptsInfo = interceptor.getAppAttempts(null, appId.toString());
assertNotNull(appAttemptsInfo);
ArrayList<AppAttemptInfo> attemptLists = appAttemptsInfo.getAttempts();
assertNotNull(appAttemptsInfo);
assertEquals(2, attemptLists.size());
AppAttemptInfo attemptInfo1 = attemptLists.get(0);
assertNotNull(attemptInfo1);
assertEquals(0, attemptInfo1.getAttemptId());
assertEquals("AppAttemptId_0", attemptInfo1.getAppAttemptId());
assertEquals("LogLink_0", attemptInfo1.getLogsLink());
assertEquals(1659621705L, attemptInfo1.getFinishedTime());
AppAttemptInfo attemptInfo2 = attemptLists.get(1);
assertNotNull(attemptInfo2);
assertEquals(0, attemptInfo2.getAttemptId());
assertEquals("AppAttemptId_1", attemptInfo2.getAppAttemptId());
assertEquals("LogLink_1", attemptInfo2.getLogsLink());
assertEquals(1659621705L, attemptInfo2.getFinishedTime());
}
@Test
public void testGetAppAttempt() throws IOException, InterruptedException {
// Generate ApplicationId information
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Generate ApplicationAttemptId information
assertNotNull(interceptor.submitApplication(context, null));
ApplicationAttemptId expectAppAttemptId = ApplicationAttemptId.newInstance(appId, 1);
String appAttemptId = expectAppAttemptId.toString();
org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo
appAttemptInfo = interceptor.getAppAttempt(null, null, appId.toString(), appAttemptId);
assertNotNull(appAttemptInfo);
assertEquals(expectAppAttemptId.toString(), appAttemptInfo.getAppAttemptId());
assertEquals("url", appAttemptInfo.getTrackingUrl());
assertEquals("oUrl", appAttemptInfo.getOriginalTrackingUrl());
assertEquals(124, appAttemptInfo.getRpcPort());
assertEquals("host", appAttemptInfo.getHost());
}
@Test
public void testGetAppTimeout() throws IOException, InterruptedException {
// Generate ApplicationId information
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Generate ApplicationAttemptId information
assertNotNull(interceptor.submitApplication(context, null));
ApplicationTimeoutType appTimeoutType = ApplicationTimeoutType.LIFETIME;
AppTimeoutInfo appTimeoutInfo =
interceptor.getAppTimeout(null, appId.toString(), appTimeoutType.toString());
assertNotNull(appTimeoutInfo);
assertEquals(10, appTimeoutInfo.getRemainingTimeInSec());
assertEquals("UNLIMITED", appTimeoutInfo.getExpireTime());
assertEquals(appTimeoutType, appTimeoutInfo.getTimeoutType());
}
@Test
public void testGetAppTimeouts() throws IOException, InterruptedException {
// Generate ApplicationId information
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Generate ApplicationAttemptId information
assertNotNull(interceptor.submitApplication(context, null));
AppTimeoutsInfo appTimeoutsInfo = interceptor.getAppTimeouts(null, appId.toString());
assertNotNull(appTimeoutsInfo);
List<AppTimeoutInfo> timeouts = appTimeoutsInfo.getAppTimeouts();
assertNotNull(timeouts);
assertEquals(1, timeouts.size());
AppTimeoutInfo resultAppTimeout = timeouts.get(0);
assertNotNull(resultAppTimeout);
assertEquals(10, resultAppTimeout.getRemainingTimeInSec());
assertEquals("UNLIMITED", resultAppTimeout.getExpireTime());
assertEquals(ApplicationTimeoutType.LIFETIME, resultAppTimeout.getTimeoutType());
}
@Test
public void testUpdateApplicationTimeout() throws IOException, InterruptedException,
YarnException {
// Generate ApplicationId information
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Generate ApplicationAttemptId information
assertNotNull(interceptor.submitApplication(context, null));
long newLifetime = 10L;
// update 10L seconds more to timeout
String timeout = Times.formatISO8601(Time.now() + newLifetime * 1000);
AppTimeoutInfo paramAppTimeOut = new AppTimeoutInfo();
paramAppTimeOut.setExpiryTime(timeout);
// RemainingTime = Math.max((timeoutInMillis - System.currentTimeMillis()) / 1000, 0))
paramAppTimeOut.setRemainingTime(newLifetime);
paramAppTimeOut.setTimeoutType(ApplicationTimeoutType.LIFETIME);
Response response =
interceptor.updateApplicationTimeout(paramAppTimeOut, null, appId.toString());
assertNotNull(response);
AppTimeoutInfo entity = (AppTimeoutInfo) response.getEntity();
assertNotNull(entity);
assertEquals(paramAppTimeOut.getExpireTime(), entity.getExpireTime());
assertEquals(paramAppTimeOut.getTimeoutType(), entity.getTimeoutType());
assertEquals(paramAppTimeOut.getRemainingTimeInSec(), entity.getRemainingTimeInSec());
}
@Test
public void testUpdateApplicationPriority() throws IOException, InterruptedException,
YarnException {
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setPriority(20);
// Submit the application we are going to kill later
assertNotNull(interceptor.submitApplication(context, null));
int iPriority = 10;
// Set Priority for application
Response response = interceptor.updateApplicationPriority(
new AppPriority(iPriority), null, appId.toString());
assertNotNull(response);
AppPriority entity = (AppPriority) response.getEntity();
assertNotNull(entity);
assertEquals(iPriority, entity.getPriority());
}
@Test
public void testGetAppPriority() throws IOException, InterruptedException {
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
int priority = 40;
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setPriority(priority);
// Submit the application we are going to kill later
assertNotNull(interceptor.submitApplication(context, null));
// Set Priority for application
AppPriority appPriority = interceptor.getAppPriority(null, appId.toString());
assertNotNull(appPriority);
assertEquals(priority, appPriority.getPriority());
}
@Test
public void testUpdateAppQueue() throws IOException, InterruptedException,
YarnException {
String oldQueue = "oldQueue";
String newQueue = "newQueue";
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setQueue(oldQueue);
// Submit the application
assertNotNull(interceptor.submitApplication(context, null));
// Set New Queue for application
Response response = interceptor.updateAppQueue(new AppQueue(newQueue),
null, appId.toString());
assertNotNull(response);
AppQueue appQueue = (AppQueue) response.getEntity();
assertEquals(newQueue, appQueue.getQueue());
// Get AppQueue by application
AppQueue queue = interceptor.getAppQueue(null, appId.toString());
assertNotNull(queue);
assertEquals(newQueue, queue.getQueue());
}
@Test
public void testGetAppQueue() throws IOException, InterruptedException {
String queueName = "queueName";
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setQueue(queueName);
assertNotNull(interceptor.submitApplication(context, null));
// Get Queue by application
AppQueue queue = interceptor.getAppQueue(null, appId.toString());
assertNotNull(queue);
assertEquals(queueName, queue.getQueue());
}
@Test
public void testGetAppsInfoCache() {
AppsInfo responseGet = interceptor.getApps(
null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
assertNotNull(responseGet);
RouterAppInfoCacheKey cacheKey = RouterAppInfoCacheKey.newInstance(
null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);
LRUCacheHashMap<RouterAppInfoCacheKey, AppsInfo> appsInfoCache =
interceptor.getAppInfosCaches();
assertNotNull(appsInfoCache);
assertFalse(appsInfoCache.isEmpty());
assertEquals(1, appsInfoCache.size());
assertTrue(appsInfoCache.containsKey(cacheKey));
AppsInfo cacheResult = appsInfoCache.get(cacheKey);
assertNotNull(cacheResult);
assertEquals(responseGet, cacheResult);
}
@Test
public void testGetAppStatistics() throws IOException, InterruptedException, YarnException {
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setApplicationType("MapReduce");
context.setQueue("queue");
assertNotNull(interceptor.submitApplication(context, null));
GetApplicationHomeSubClusterRequest request =
GetApplicationHomeSubClusterRequest.newInstance(appId);
GetApplicationHomeSubClusterResponse response =
stateStore.getApplicationHomeSubCluster(request);
assertNotNull(response);
ApplicationHomeSubCluster homeSubCluster = response.getApplicationHomeSubCluster();
DefaultRequestInterceptorREST interceptorREST =
interceptor.getInterceptorForSubCluster(homeSubCluster.getHomeSubCluster());
MockDefaultRequestInterceptorREST mockInterceptorREST =
(MockDefaultRequestInterceptorREST) interceptorREST;
mockInterceptorREST.updateApplicationState(YarnApplicationState.RUNNING,
appId.toString());
Set<String> stateQueries = new HashSet<>();
stateQueries.add(YarnApplicationState.RUNNING.name());
Set<String> typeQueries = new HashSet<>();
typeQueries.add("MapReduce");
ApplicationStatisticsInfo response2 =
interceptor.getAppStatistics(null, stateQueries, typeQueries);
assertNotNull(response2);
assertFalse(response2.getStatItems().isEmpty());
StatisticsItemInfo result = response2.getStatItems().get(0);
assertEquals(1, result.getCount());
assertEquals(YarnApplicationState.RUNNING, result.getState());
assertEquals("MapReduce", result.getType());
}
@Test
public void testGetAppActivities() throws IOException, InterruptedException {
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setApplicationType("MapReduce");
context.setQueue("queue");
assertNotNull(interceptor.submitApplication(context, null));
Set<String> prioritiesSet = Collections.singleton("0");
Set<String> allocationRequestIdsSet = Collections.singleton("0");
AppActivitiesInfo appActivitiesInfo =
interceptor.getAppActivities(null, appId.toString(), String.valueOf(Time.now()),
prioritiesSet, allocationRequestIdsSet, null, "-1", null, false);
assertNotNull(appActivitiesInfo);
assertEquals(appId.toString(), appActivitiesInfo.getApplicationId());
assertEquals(10, appActivitiesInfo.getAllocations().size());
}
@Test
public void testListReservation() throws Exception {
// submitReservation
ReservationId reservationId = ReservationId.newInstance(Time.now(), 1);
submitReservation(reservationId);
// Call the listReservation method
String applyReservationId = reservationId.toString();
Response listReservationResponse = interceptor.listReservation(
QUEUE_DEDICATED_FULL, applyReservationId, -1, -1, false, null);
assertNotNull(listReservationResponse);
assertNotNull(listReservationResponse.getStatus());
Status status = Status.fromStatusCode(listReservationResponse.getStatus());
assertEquals(Status.OK, status);
Object entity = listReservationResponse.getEntity();
assertNotNull(entity);
assertNotNull(entity instanceof ReservationListInfo);
assertTrue(entity instanceof ReservationListInfo);
ReservationListInfo listInfo = (ReservationListInfo) entity;
assertNotNull(listInfo);
List<ReservationInfo> reservationInfoList = listInfo.getReservations();
assertNotNull(reservationInfoList);
assertEquals(1, reservationInfoList.size());
ReservationInfo reservationInfo = reservationInfoList.get(0);
assertNotNull(reservationInfo);
assertEquals(applyReservationId, reservationInfo.getReservationId());
ReservationDefinitionInfo definitionInfo = reservationInfo.getReservationDefinition();
assertNotNull(definitionInfo);
ReservationRequestsInfo reservationRequestsInfo = definitionInfo.getReservationRequests();
assertNotNull(reservationRequestsInfo);
ArrayList<ReservationRequestInfo> reservationRequestInfoList =
reservationRequestsInfo.getReservationRequest();
assertNotNull(reservationRequestInfoList);
assertEquals(1, reservationRequestInfoList.size());
ReservationRequestInfo reservationRequestInfo = reservationRequestInfoList.get(0);
assertNotNull(reservationRequestInfo);
assertEquals(4, reservationRequestInfo.getNumContainers());
ResourceInfo resourceInfo = reservationRequestInfo.getCapability();
assertNotNull(resourceInfo);
int vCore = resourceInfo.getvCores();
long memory = resourceInfo.getMemorySize();
assertEquals(1, vCore);
assertEquals(1024, memory);
}
@Test
public void testCreateNewReservation() throws Exception {
Response response = interceptor.createNewReservation(null);
assertNotNull(response);
Object entity = response.getEntity();
assertNotNull(entity);
assertTrue(entity instanceof NewReservation);
NewReservation newReservation = (NewReservation) entity;
assertNotNull(newReservation);
assertTrue(newReservation.getReservationId().contains("reservation"));
}
@Test
public void testSubmitReservation() throws Exception {
// submit reservation
ReservationId reservationId = ReservationId.newInstance(Time.now(), 2);
Response response = submitReservation(reservationId);
assertNotNull(response);
assertEquals(Status.ACCEPTED.getStatusCode(), response.getStatus());
String applyReservationId = reservationId.toString();
Response reservationResponse = interceptor.listReservation(
QUEUE_DEDICATED_FULL, applyReservationId, -1, -1, false, null);
assertNotNull(reservationResponse);
Object entity = reservationResponse.getEntity();
assertNotNull(entity);
assertNotNull(entity instanceof ReservationListInfo);
assertTrue(entity instanceof ReservationListInfo);
ReservationListInfo listInfo = (ReservationListInfo) entity;
assertNotNull(listInfo);
List<ReservationInfo> reservationInfos = listInfo.getReservations();
assertNotNull(reservationInfos);
assertEquals(1, reservationInfos.size());
ReservationInfo reservationInfo = reservationInfos.get(0);
assertNotNull(reservationInfo);
assertEquals(reservationInfo.getReservationId(), applyReservationId);
}
@Test
public void testUpdateReservation() throws Exception {
// submit reservation
ReservationId reservationId = ReservationId.newInstance(Time.now(), 3);
Response response = submitReservation(reservationId);
assertNotNull(response);
assertEquals(Status.ACCEPTED.getStatusCode(), response.getStatus());
// update reservation
ReservationSubmissionRequest resSubRequest =
getReservationSubmissionRequest(reservationId, 6, 2048, 2);
ReservationDefinition reservationDefinition = resSubRequest.getReservationDefinition();
ReservationDefinitionInfo reservationDefinitionInfo =
new ReservationDefinitionInfo(reservationDefinition);
ReservationUpdateRequestInfo updateRequestInfo = new ReservationUpdateRequestInfo();
updateRequestInfo.setReservationId(reservationId.toString());
updateRequestInfo.setReservationDefinition(reservationDefinitionInfo);
Response updateReservationResp = interceptor.updateReservation(updateRequestInfo, null);
assertNotNull(updateReservationResp);
assertEquals(Status.OK.getStatusCode(), updateReservationResp.getStatus());
String applyReservationId = reservationId.toString();
Response reservationResponse = interceptor.listReservation(
QUEUE_DEDICATED_FULL, applyReservationId, -1, -1, false, null);
assertNotNull(reservationResponse);
Object entity = reservationResponse.getEntity();
assertNotNull(entity);
assertNotNull(entity instanceof ReservationListInfo);
assertTrue(entity instanceof ReservationListInfo);
ReservationListInfo listInfo = (ReservationListInfo) entity;
assertNotNull(listInfo);
List<ReservationInfo> reservationInfos = listInfo.getReservations();
assertNotNull(reservationInfos);
assertEquals(1, reservationInfos.size());
ReservationInfo reservationInfo = reservationInfos.get(0);
assertNotNull(reservationInfo);
assertEquals(reservationInfo.getReservationId(), applyReservationId);
ReservationDefinitionInfo resDefinitionInfo = reservationInfo.getReservationDefinition();
assertNotNull(resDefinitionInfo);
ReservationRequestsInfo reservationRequestsInfo = resDefinitionInfo.getReservationRequests();
assertNotNull(reservationRequestsInfo);
ArrayList<ReservationRequestInfo> reservationRequestInfoList =
reservationRequestsInfo.getReservationRequest();
assertNotNull(reservationRequestInfoList);
assertEquals(1, reservationRequestInfoList.size());
ReservationRequestInfo reservationRequestInfo = reservationRequestInfoList.get(0);
assertNotNull(reservationRequestInfo);
assertEquals(6, reservationRequestInfo.getNumContainers());
ResourceInfo resourceInfo = reservationRequestInfo.getCapability();
assertNotNull(resourceInfo);
int vCore = resourceInfo.getvCores();
long memory = resourceInfo.getMemorySize();
assertEquals(2, vCore);
assertEquals(2048, memory);
}
@Test
public void testDeleteReservation() throws Exception {
// submit reservation
ReservationId reservationId = ReservationId.newInstance(Time.now(), 4);
Response response = submitReservation(reservationId);
assertNotNull(response);
assertEquals(Status.ACCEPTED.getStatusCode(), response.getStatus());
String applyResId = reservationId.toString();
Response reservationResponse = interceptor.listReservation(
QUEUE_DEDICATED_FULL, applyResId, -1, -1, false, null);
assertNotNull(reservationResponse);
ReservationDeleteRequestInfo deleteRequestInfo =
new ReservationDeleteRequestInfo();
deleteRequestInfo.setReservationId(applyResId);
Response delResponse = interceptor.deleteReservation(deleteRequestInfo, null);
assertNotNull(delResponse);
NotFoundException exception = assertThrows(NotFoundException.class, () -> {
interceptor.listReservation(QUEUE_DEDICATED_FULL, applyResId, -1, -1, false, null);
});
String stackTraceAsString = getStackTraceAsString(exception);
assertTrue(stackTraceAsString.contains("reservationId with id: " +
reservationId + " not found"));
}
private Response submitReservation(ReservationId reservationId)
throws IOException, InterruptedException {
ReservationSubmissionRequestInfo resSubmissionRequestInfo =
getReservationSubmissionRequestInfo(reservationId);
return interceptor.submitReservation(resSubmissionRequestInfo, null);
}
public static ReservationSubmissionRequestInfo getReservationSubmissionRequestInfo(
ReservationId reservationId) {
ReservationSubmissionRequest resSubRequest =
getReservationSubmissionRequest(reservationId, NUM_CONTAINERS, 1024, 1);
ReservationDefinition reservationDefinition = resSubRequest.getReservationDefinition();
ReservationSubmissionRequestInfo resSubmissionRequestInfo =
new ReservationSubmissionRequestInfo();
resSubmissionRequestInfo.setQueue(resSubRequest.getQueue());
resSubmissionRequestInfo.setReservationId(reservationId.toString());
ReservationDefinitionInfo reservationDefinitionInfo =
new ReservationDefinitionInfo(reservationDefinition);
resSubmissionRequestInfo.setReservationDefinition(reservationDefinitionInfo);
return resSubmissionRequestInfo;
}
public static ReservationSubmissionRequest getReservationSubmissionRequest(
ReservationId reservationId, int numContainers, int memory, int vcore) {
// arrival time from which the resource(s) can be allocated.
long arrival = Time.now();
// deadline by when the resource(s) must be allocated.
// The reason for choosing 1.05 is that this gives an integer
// DURATION * 0.05 = 3000(ms)
// deadline = arrival + 3000ms
long deadline = (long) (arrival + 1.05 * DURATION);
return createSimpleReservationRequest(
reservationId, numContainers, arrival, deadline, DURATION, memory, vcore);
}
public static ReservationSubmissionRequest createSimpleReservationRequest(
ReservationId reservationId, int numContainers, long arrival,
long deadline, long duration, int memory, int vcore) {
// create a request with a single atomic ask
ReservationRequest r = ReservationRequest.newInstance(
Resource.newInstance(memory, vcore), numContainers, 1, duration);
ReservationRequests reqs = ReservationRequests.newInstance(
Collections.singletonList(r), ReservationRequestInterpreter.R_ALL);
ReservationDefinition rDef = ReservationDefinition.newInstance(
arrival, deadline, reqs, "testClientRMService#reservation", "0", Priority.UNDEFINED);
return ReservationSubmissionRequest.newInstance(rDef, QUEUE_DEDICATED_FULL, reservationId);
}
@Test
public void testWebAddressWithScheme() {
// The style of the web address reported by the subCluster in the heartbeat is 0.0.0.0:8000
// We design the following 2 test cases:
String webAppAddress = "0.0.0.0:8000";
// 1. We try to disable Https, at this point we should get the following link:
// http://0.0.0.0:8000
String expectedHttpWebAddress = "http://0.0.0.0:8000";
String webAppAddressWithScheme =
WebAppUtils.getHttpSchemePrefix(this.getConf()) + webAppAddress;
assertEquals(expectedHttpWebAddress, webAppAddressWithScheme);
// 2. We try to enable Https,at this point we should get the following link:
// https://0.0.0.0:8000
Configuration configuration = this.getConf();
configuration.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTPS_ONLY.name());
String expectedHttpsWebAddress = "https://0.0.0.0:8000";
String webAppAddressWithScheme2 =
WebAppUtils.getHttpSchemePrefix(this.getConf()) + webAppAddress;
assertEquals(expectedHttpsWebAddress, webAppAddressWithScheme2);
}
@Test
public void testCheckUserAccessToQueue() throws Exception {
interceptor.setAllowPartialResult(false);
// Case 1: Only queue admin user can access other user's information
HttpServletRequest mockHsr = mockHttpServletRequestByUserName("non-admin");
String errorMsg1 = "User=non-admin doesn't haven access to queue=queue " +
"so it cannot check ACLs for other users.";
RuntimeException exception = assertThrows(RuntimeException.class, () -> {
interceptor.checkUserAccessToQueue("queue", "jack",
QueueACL.SUBMIT_APPLICATIONS.name(), mockHsr);
});
String stackTraceAsString = getStackTraceAsString(exception);
assertTrue(stackTraceAsString.contains(errorMsg1));
// Case 2: request an unknown ACL causes BAD_REQUEST
HttpServletRequest mockHsr1 = mockHttpServletRequestByUserName("admin");
String errorMsg2 = "Specified queueAclType=XYZ_ACL is not a valid type, " +
"valid queue acl types={SUBMIT_APPLICATIONS/ADMINISTER_QUEUE}";
RuntimeException exception2 = assertThrows(RuntimeException.class, () -> {
interceptor.checkUserAccessToQueue("queue", "jack",
"XYZ_ACL", mockHsr1);
});
String stackTraceAsString2 = getStackTraceAsString(exception2);
assertTrue(stackTraceAsString2.contains(errorMsg2));
// We design a test, admin user has ADMINISTER_QUEUE, SUBMIT_APPLICATIONS permissions,
// yarn user has SUBMIT_APPLICATIONS permissions, other users have no permissions
// Case 3: get FORBIDDEN for rejected ACL
checkUserAccessToQueueFailed("queue", "jack", QueueACL.SUBMIT_APPLICATIONS, "admin");
checkUserAccessToQueueFailed("queue", "jack", QueueACL.ADMINISTER_QUEUE, "admin");
// Case 4: get OK for listed ACLs
checkUserAccessToQueueSuccess("queue", "admin", QueueACL.ADMINISTER_QUEUE, "admin");
checkUserAccessToQueueSuccess("queue", "admin", QueueACL.SUBMIT_APPLICATIONS, "admin");
// Case 5: get OK only for SUBMIT_APP acl for "yarn" user
checkUserAccessToQueueFailed("queue", "yarn", QueueACL.ADMINISTER_QUEUE, "admin");
checkUserAccessToQueueSuccess("queue", "yarn", QueueACL.SUBMIT_APPLICATIONS, "admin");
interceptor.setAllowPartialResult(true);
}
private void checkUserAccessToQueueSuccess(String queue, String userName,
QueueACL queueACL, String mockUser) throws AuthorizationException {
HttpServletRequest mockHsr = mockHttpServletRequestByUserName(mockUser);
RMQueueAclInfo aclInfo =
interceptor.checkUserAccessToQueue(queue, userName, queueACL.name(), mockHsr);
assertNotNull(aclInfo);
assertTrue(aclInfo instanceof FederationRMQueueAclInfo);
FederationRMQueueAclInfo fedAclInfo = (FederationRMQueueAclInfo) aclInfo;
List<RMQueueAclInfo> aclInfos = fedAclInfo.getList();
assertNotNull(aclInfos);
assertEquals(4, aclInfos.size());
for (RMQueueAclInfo rMQueueAclInfo : aclInfos) {
assertTrue(rMQueueAclInfo.isAllowed());
}
}
private void checkUserAccessToQueueFailed(String queue, String userName,
QueueACL queueACL, String mockUser) throws AuthorizationException {
HttpServletRequest mockHsr = mockHttpServletRequestByUserName(mockUser);
RMQueueAclInfo aclInfo =
interceptor.checkUserAccessToQueue(queue, userName, queueACL.name(), mockHsr);
assertNotNull(aclInfo);
assertTrue(aclInfo instanceof FederationRMQueueAclInfo);
FederationRMQueueAclInfo fedAclInfo = (FederationRMQueueAclInfo) aclInfo;
List<RMQueueAclInfo> aclInfos = fedAclInfo.getList();
assertNotNull(aclInfos);
assertEquals(4, aclInfos.size());
for (RMQueueAclInfo rMQueueAclInfo : aclInfos) {
assertFalse(rMQueueAclInfo.isAllowed());
String expectDiagnostics = "User=" + userName +
" doesn't have access to queue=queue with acl-type=" + queueACL.name();
assertEquals(expectDiagnostics, rMQueueAclInfo.getDiagnostics());
}
}
private HttpServletRequest mockHttpServletRequestByUserName(String username) {
HttpServletRequest mockHsr = mock(HttpServletRequest.class);
when(mockHsr.getRemoteUser()).thenReturn(username);
Principal principal = mock(Principal.class);
when(principal.getName()).thenReturn(username);
when(mockHsr.getUserPrincipal()).thenReturn(principal);
return mockHsr;
}
@Test
public void testCheckFederationInterceptorRESTClient() {
SubClusterId subClusterId = SubClusterId.newInstance("SC-1");
String webAppSocket = "SC-1:WebAddress";
String webAppAddress = "http://" + webAppSocket;
Configuration configuration = new Configuration();
FederationInterceptorREST rest = new FederationInterceptorREST();
rest.setConf(configuration);
rest.init("router");
DefaultRequestInterceptorREST interceptorREST =
rest.getOrCreateInterceptorForSubCluster(subClusterId, webAppSocket);
assertNotNull(interceptorREST);
assertNotNull(interceptorREST.getClient());
assertEquals(webAppAddress, interceptorREST.getWebAppAddress());
}
@Test
public void testInvokeConcurrent() throws IOException, YarnException {
// We design such a test case, we call the interceptor's getNodes interface,
// this | TestFederationInterceptorREST |
java | google__dagger | javatests/dagger/functional/builder/BuildMethodCovariantReturnTest.java | {
"start": 943,
"end": 1172
} | interface ____ {
Object build();
}
}
@Test
public void componentTest() {
Object component = DaggerBuildMethodCovariantReturnTest_C.builder().build();
assertThat(component).isInstanceOf(C.class);
}
}
| Builder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java | {
"start": 1643,
"end": 1714
} | class ____ allocating an unassigned shard to a node
*/
public abstract | for |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/node/DecimalNode.java | {
"start": 359,
"end": 7363
} | class ____
extends NumericFPNode
{
private static final long serialVersionUID = 3L;
public static final DecimalNode ZERO = new DecimalNode(BigDecimal.ZERO);
final protected BigDecimal _value;
/*
/**********************************************************************
/* Construction
/**********************************************************************
*/
public DecimalNode(BigDecimal v) {
// 01-Mar-2024, tatu: [databind#4381] No null-valued JsonNodes
_value = Objects.requireNonNull(v);
}
public static DecimalNode valueOf(BigDecimal d) { return new DecimalNode(d); }
/*
/**********************************************************************
/* Overridden JsonNode methods, simple properties
/**********************************************************************
*/
@Override
public JsonParser.NumberType numberType() { return JsonParser.NumberType.BIG_DECIMAL; }
@Override
public boolean isBigDecimal() { return true; }
@Override
public boolean isNaN() { return false; }
/*
/**********************************************************************
/* Overridden JsonNode methods, scalar access, non-numeric
/**********************************************************************
*/
@Override
public String _asString() {
return _value.toString();
}
/*
/**********************************************************************
/* Overridden JsonNode methods, scalar access, numeric
/**********************************************************************
*/
@Override
public Number numberValue() { return _value; }
@Override
public float floatValue() {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return f;
}
return _reportFloatCoercionRangeFail("floatValue()");
}
@Override
public float floatValue(float defaultValue) {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return f;
}
return defaultValue;
}
@Override
public Optional<Float> floatValueOpt() {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return Optional.of(f);
}
return Optional.empty();
}
@Override
public float asFloat() {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return f;
}
return _reportFloatCoercionRangeFail("asFloat()");
}
@Override
public float asFloat(float defaultValue) {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return f;
}
return defaultValue;
}
@Override
public Optional<Float> asFloatOpt() {
float f = _value.floatValue();
if (Float.isFinite(f)) {
return Optional.of(f);
}
return Optional.empty();
}
@Override
public double doubleValue() {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return d;
}
return _reportDoubleCoercionRangeFail("doubleValue()");
}
@Override
public double doubleValue(double defaultValue) {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return d;
}
return defaultValue;
}
@Override
public OptionalDouble doubleValueOpt() {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return OptionalDouble.of(d);
}
return OptionalDouble.empty();
}
@Override
public double asDouble() {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return d;
}
return _reportDoubleCoercionRangeFail("asDouble()");
}
@Override
public double asDouble(double defaultValue) {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return d;
}
return defaultValue;
}
@Override
public OptionalDouble asDoubleOpt() {
double d = _value.doubleValue();
if (Double.isFinite(d)) {
return OptionalDouble.of(d);
}
return OptionalDouble.empty();
}
// Overridden versions from NumericFPNode (for minor performance gain)
@Override
public BigDecimal decimalValue() { return _value; }
@Override
public BigDecimal decimalValue(BigDecimal defaultValue) { return _value; }
@Override
public Optional<BigDecimal> decimalValueOpt() { return Optional.of(_value); }
@Override
public BigDecimal asDecimal() { return _value; }
@Override
public BigDecimal asDecimal(BigDecimal defaultValue) { return _value; }
@Override
public Optional<BigDecimal> asDecimalOpt() { return Optional.of(_value); }
/*
/**********************************************************************
/* NumericFPNode abstract method impls
/**********************************************************************
*/
@Override
public short _asShortValueUnchecked() {
return _value.shortValue();
}
@Override
public int _asIntValueUnchecked() {
return _value.intValue();
}
@Override
public long _asLongValueUnchecked() {
return _value.longValue();
}
@Override
protected BigInteger _asBigIntegerValueUnchecked() {
return _value.toBigInteger();
}
@Override
protected BigDecimal _asDecimalValueUnchecked() {
return _value;
}
@Override
public boolean hasFractionalPart() {
return (_value.signum() != 0)
&& (_value.scale() > 0)
&& (_value.stripTrailingZeros().scale() > 0);
}
@Override
public boolean inShortRange() {
return (_value.compareTo(BD_MIN_SHORT) >= 0) && (_value.compareTo(BD_MAX_SHORT) <= 0);
}
@Override
public boolean inIntRange() {
return (_value.compareTo(BD_MIN_INTEGER) >= 0) && (_value.compareTo(BD_MAX_INTEGER) <= 0);
}
@Override
public boolean inLongRange() {
return (_value.compareTo(BD_MIN_LONG) >= 0) && (_value.compareTo(BD_MAX_LONG) <= 0);
}
/*
/**********************************************************************
/* Other overrides
/**********************************************************************
*/
@Override
public final void serialize(JsonGenerator g, SerializationContext provider)
throws JacksonException
{
g.writeNumber(_value);
}
@Override
public boolean equals(Object o)
{
if (o == this) return true;
if (o == null) return false;
if (o instanceof DecimalNode otherNode) {
return otherNode._value.equals(_value);
}
return false;
}
@Override
public int hashCode() {
return _value.hashCode();
}
}
| DecimalNode |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builtin/jodatime/mapper/XmlGregorianCalendarToLocalTime.java | {
"start": 496,
"end": 787
} | interface ____ {
XmlGregorianCalendarToLocalTime INSTANCE = Mappers.getMapper( XmlGregorianCalendarToLocalTime.class );
@Mapping( target = "localTime", source = "xMLGregorianCalendar" )
LocalTimeBean toLocalTimeBean( XmlGregorianCalendarBean in );
}
| XmlGregorianCalendarToLocalTime |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/test/rxjava/RxTest.java | {
"start": 338,
"end": 534
} | class ____ {
@Inject
BeanContext beanContext;
@Test
void testMicronaut4Inject() {
Assertions.assertNotNull(beanContext.getBean(Rx3StreamingHttpClient.class));
}
}
| RxTest |
java | apache__hadoop | hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java | {
"start": 1220,
"end": 2139
} | class ____ extends DataJoinReducerBase {
/**
*
* @param tags
* a list of source tags
* @param values
* a value per source
* @return combined value derived from values of the sources
*/
protected TaggedMapOutput combine(Object[] tags, Object[] values) {
// eliminate rows which didnot match in one of the two tables (for INNER JOIN)
if (tags.length < 2)
return null;
String joinedStr = "";
for (int i=0; i<tags.length; i++) {
if (i > 0)
joinedStr += "\t";
// strip first column as it is the key on which we joined
String line = ((Text) (((TaggedMapOutput) values[i]).getData())).toString();
String[] tokens = line.split("\\t", 2);
joinedStr += tokens[1];
}
TaggedMapOutput retv = new SampleTaggedMapOutput(new Text(joinedStr));
retv.setTag((Text) tags[0]);
return retv;
}
}
| SampleDataJoinReducer |
java | spring-projects__spring-boot | module/spring-boot-websocket/src/test/java/org/springframework/boot/websocket/autoconfigure/servlet/WebSocketMessagingAutoConfigurationTests.java | {
"start": 15847,
"end": 16056
} | class ____ {
@SubscribeMapping("/json")
Data json() {
return new Data(5, "baz");
}
@SubscribeMapping("/string")
String string() {
return "string data";
}
}
public static | MessagingController |
java | elastic__elasticsearch | x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseFrozenSearchableSnapshotsIntegTestCase.java | {
"start": 702,
"end": 2054
} | class ____ extends BaseSearchableSnapshotsIntegTestCase {
@Override
protected boolean forceSingleDataPath() {
return true;
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
if (DiscoveryNode.canContainData(otherSettings)) {
builder.put(
SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(),
rarely()
? randomBoolean()
? ByteSizeValue.of(randomIntBetween(1, 10), ByteSizeUnit.KB).getStringRep()
: ByteSizeValue.of(randomIntBetween(1, 1000), ByteSizeUnit.BYTES).getStringRep()
: randomBoolean() ? ByteSizeValue.of(randomIntBetween(1, 10), ByteSizeUnit.MB).getStringRep()
: new RatioValue(randomDoubleBetween(0.0d, 0.1d, false)).toString() // only use up to 0.1% disk to be friendly.
// don't test mmap on Windows since we don't have code to unmap the shared cache file which trips assertions after tests
).put(SharedBlobCacheService.SHARED_CACHE_MMAP.getKey(), WINDOWS == false && randomBoolean());
}
return builder.build();
}
}
| BaseFrozenSearchableSnapshotsIntegTestCase |
java | netty__netty | transport/src/main/java/io/netty/channel/ChannelHandler.java | {
"start": 5422,
"end": 6896
} | class ____ extends {@link ChannelInitializer}<{@link Channel}> {
*
* private static final DataServerHandler <b>SHARED</b> = new DataServerHandler();
*
* {@code @Override}
* public void initChannel({@link Channel} channel) {
* channel.pipeline().addLast("handler", <b>SHARED</b>);
* }
* }
* </pre>
*
*
* <h4>The {@code @Sharable} annotation</h4>
* <p>
* In the example above which used an {@link AttributeKey},
* you might have noticed the {@code @Sharable} annotation.
* <p>
* If a {@link ChannelHandler} is annotated with the {@code @Sharable}
* annotation, it means you can create an instance of the handler just once and
* add it to one or more {@link ChannelPipeline}s multiple times without
* a race condition.
* <p>
* If this annotation is not specified, you have to create a new handler
* instance every time you add it to a pipeline because it has unshared state
* such as member variables.
* <p>
* This annotation is provided for documentation purpose, just like
* <a href="http://www.javaconcurrencyinpractice.com/annotations/doc/">the JCIP annotations</a>.
*
* <h3>Additional resources worth reading</h3>
* <p>
* Please refer to the {@link ChannelHandler}, and
* {@link ChannelPipeline} to find out more about inbound and outbound operations,
* what fundamental differences they have, how they flow in a pipeline, and how to handle
* the operation in your application.
*/
public | DataServerInitializer |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsFrame.java | {
"start": 834,
"end": 1715
} | class ____ implements Http2SettingsFrame {
private final Http2Settings settings;
public DefaultHttp2SettingsFrame(Http2Settings settings) {
this.settings = ObjectUtil.checkNotNull(settings, "settings");
}
@Override
public Http2Settings settings() {
return settings;
}
@Override
public String name() {
return "SETTINGS";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Http2SettingsFrame)) {
return false;
}
Http2SettingsFrame other = (Http2SettingsFrame) o;
return settings.equals(other.settings());
}
@Override
public int hashCode() {
return settings.hashCode();
}
@Override
public String toString() {
return StringUtil.simpleClassName(this) + "(settings=" + settings + ')';
}
}
| DefaultHttp2SettingsFrame |
java | micronaut-projects__micronaut-core | inject-groovy/src/main/groovy/io/micronaut/ast/groovy/scan/AnnotationClassReader.java | {
"start": 17402,
"end": 17583
} | class
____ (enclosingOwner != null) {
classVisitor.visitOuterClass(enclosingOwner, enclosingName,
enclosingDesc);
}
// visits the | if |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForAttributes.java | {
"start": 3389,
"end": 3886
} | class ____ extends NodeLabelTestBase {
private static final RecordFactory RECORD_FACTORY =
RecordFactoryProvider.getRecordFactory(null);
private NodeManager nm;
private DummyNodeAttributesProvider dummyAttributesProviderRef;
@BeforeEach
public void setup() {
dummyAttributesProviderRef = new DummyNodeAttributesProvider();
}
@AfterEach
public void tearDown() {
if (null != nm) {
ServiceOperations.stop(nm);
}
}
private | TestNodeStatusUpdaterForAttributes |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/common/state/BroadcastState.java | {
"start": 2134,
"end": 3521
} | interface ____<K, V> extends ReadOnlyBroadcastState<K, V> {
/**
* Associates a new value with the given key.
*
* @param key The key of the mapping
* @param value The new value of the mapping
* @throws Exception Thrown if the system cannot access the state.
*/
void put(K key, V value) throws Exception;
/**
* Copies all of the mappings from the given map into the state.
*
* @param map The mappings to be stored in this state
* @throws Exception Thrown if the system cannot access the state.
*/
void putAll(Map<K, V> map) throws Exception;
/**
* Deletes the mapping of the given key.
*
* @param key The key of the mapping
* @throws Exception Thrown if the system cannot access the state.
*/
void remove(K key) throws Exception;
/**
* Iterates over all the mappings in the state.
*
* @return An iterator over all the mappings in the state
* @throws Exception Thrown if the system cannot access the state.
*/
Iterator<Map.Entry<K, V>> iterator() throws Exception;
/**
* Returns all the mappings in the state.
*
* @return An iterable view of all the key-value pairs in the state.
* @throws Exception Thrown if the system cannot access the state.
*/
Iterable<Map.Entry<K, V>> entries() throws Exception;
}
| BroadcastState |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java | {
"start": 20500,
"end": 26781
} | class ____ to consume all available log data
consumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Turn off autocommit since we always want to consume the full log
consumerConfigs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
return new KafkaConsumer<>(consumerConfigs);
}
/**
* Signals whether a topic partition should be read by this log. Invoked on {@link #start() startup} once
* for every partition found in the log's backing topic.
* <p>This method can be overridden by subclasses when only a subset of the assigned partitions
* should be read into memory. By default, all partitions are read.
* @param topicPartition A topic partition which could be read by this log.
* @return true if the partition should be read by this log, false if its contents should be ignored.
*/
protected boolean readPartition(TopicPartition topicPartition) {
return true;
}
private void poll() {
try {
ConsumerRecords<K, V> records = consumer.poll(Duration.ofMillis(Integer.MAX_VALUE));
for (ConsumerRecord<K, V> record : records)
consumedCallback.onCompletion(null, record);
} catch (WakeupException e) {
// Expected on get() or stop(). The calling code should handle this
throw e;
} catch (KafkaException e) {
log.error("Error polling: ", e);
if (reportErrorsToCallback) {
consumedCallback.onCompletion(e, null);
}
}
}
/**
* This method finds the end offsets of the Kafka log's topic partitions, optionally retrying
* if the {@code listOffsets()} method of the admin client throws a {@link RetriableException}.
*
* @param shouldRetry Boolean flag to enable retry for the admin client {@code listOffsets()} call.
* @see TopicAdmin#retryEndOffsets
*/
private void readToLogEnd(boolean shouldRetry) {
Set<TopicPartition> assignment = consumer.assignment();
Map<TopicPartition, Long> endOffsets = readEndOffsets(assignment, shouldRetry);
log.trace("Reading to end of log offsets {}", endOffsets);
while (!endOffsets.isEmpty()) {
Iterator<Map.Entry<TopicPartition, Long>> it = endOffsets.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<TopicPartition, Long> entry = it.next();
TopicPartition topicPartition = entry.getKey();
long endOffset = entry.getValue();
long lastConsumedOffset = consumer.position(topicPartition);
if (lastConsumedOffset >= endOffset) {
log.trace("Read to end offset {} for {}", endOffset, topicPartition);
it.remove();
} else {
log.trace("Behind end offset {} for {}; last-read offset is {}",
endOffset, topicPartition, lastConsumedOffset);
poll();
break;
}
}
}
}
// Visible for testing
/**
* Read to the end of the given list of topic partitions
* @param assignment the topic partitions to read to the end of
* @param shouldRetry boolean flag to enable retry for the admin client {@code listOffsets()} call.
* @throws UnsupportedVersionException if the log's consumer is using the "read_committed" isolation level (and
* therefore a separate admin client is required to read end offsets for the topic), but the broker does not support
* reading end offsets using an admin client
*/
Map<TopicPartition, Long> readEndOffsets(Set<TopicPartition> assignment, boolean shouldRetry) throws UnsupportedVersionException {
log.trace("Reading to end of offset log");
// Note that we'd prefer to not use the consumer to find the end offsets for the assigned topic partitions.
// That is because it's possible that the consumer is already blocked waiting for new records to appear, when
// the consumer is already at the end. In such cases, using 'consumer.endOffsets(...)' will block until at least
// one more record becomes available, meaning we can't even check whether we're at the end offset.
// Since all we're trying to do here is get the end offset, we should use the supplied admin client
// (if available) to obtain the end offsets for the given topic partitions.
// Deprecated constructors do not provide an admin supplier, so the admin is potentially null.
if (admin != null) {
// Use the admin client to immediately find the end offsets for the assigned topic partitions.
// Unlike using the consumer
try {
if (shouldRetry) {
return admin.retryEndOffsets(assignment,
ADMIN_CLIENT_RETRY_DURATION,
ADMIN_CLIENT_RETRY_BACKOFF_MS);
}
return admin.endOffsets(assignment);
} catch (UnsupportedVersionException e) {
// This may happen with really old brokers that don't support the auto topic creation
// field in metadata requests
if (requireAdminForOffsets) {
// Should be handled by the caller during log startup
throw e;
}
log.debug("Reading to end of log offsets with consumer since admin client is unsupported: {}", e.getMessage());
// Forget the reference to the admin so that we won't even try to use the admin the next time this method is called
admin = null;
// continue and let the consumer handle the read
}
// Other errors, like timeouts and retriable exceptions are intentionally propagated
}
// The admin may be null if older deprecated constructor is used or if the admin client is using a broker that doesn't
// support getting the end offsets (e.g., 0.10.x). In such cases, we should use the consumer, which is not ideal (see above).
return consumer.endOffsets(assignment);
}
private | wants |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/errorhandler/JtaTransactionErrorHandlerDefinition.java | {
"start": 1297,
"end": 1939
} | class ____ extends TransactionErrorHandlerDefinition {
public JtaTransactionErrorHandlerDefinition() {
}
public JtaTransactionErrorHandlerDefinition(JtaTransactionErrorHandlerDefinition source) {
super(source);
}
@Override
public JtaTransactionErrorHandlerDefinition copyDefinition() {
return new JtaTransactionErrorHandlerDefinition(this);
}
@Override
public ErrorHandlerFactory cloneBuilder() {
TransactionErrorHandlerDefinition answer = new JtaTransactionErrorHandlerDefinition();
cloneBuilder(answer);
return answer;
}
}
| JtaTransactionErrorHandlerDefinition |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredArgumentsCodeGenerator.java | {
"start": 1352,
"end": 3192
} | class ____ {
private final Class<?> target;
private final Executable executable;
public AutowiredArgumentsCodeGenerator(Class<?> target, Executable executable) {
this.target = target;
this.executable = executable;
}
public CodeBlock generateCode(Class<?>[] parameterTypes) {
return generateCode(parameterTypes, 0, "args");
}
public CodeBlock generateCode(Class<?>[] parameterTypes, int startIndex) {
return generateCode(parameterTypes, startIndex, "args");
}
public CodeBlock generateCode(Class<?>[] parameterTypes, int startIndex, String variableName) {
Assert.notNull(parameterTypes, "'parameterTypes' must not be null");
Assert.notNull(variableName, "'variableName' must not be null");
boolean ambiguous = isAmbiguous();
CodeBlock.Builder code = CodeBlock.builder();
for (int i = startIndex; i < parameterTypes.length; i++) {
code.add(i > startIndex ? ", " : "");
if (!ambiguous) {
code.add("$L.get($L)", variableName, i);
}
else {
code.add("$L.get($L, $T.class)", variableName, i, parameterTypes[i]);
}
}
return code.build();
}
private boolean isAmbiguous() {
if (this.executable instanceof Constructor<?> constructor) {
return Arrays.stream(this.target.getDeclaredConstructors())
.filter(Predicate.not(constructor::equals))
.anyMatch(this::hasSameParameterCount);
}
if (this.executable instanceof Method method) {
return Arrays.stream(ReflectionUtils.getAllDeclaredMethods(this.target))
.filter(Predicate.not(method::equals))
.filter(candidate -> candidate.getName().equals(method.getName()))
.anyMatch(this::hasSameParameterCount);
}
return true;
}
private boolean hasSameParameterCount(Executable executable) {
return this.executable.getParameterCount() == executable.getParameterCount();
}
}
| AutowiredArgumentsCodeGenerator |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGenerator.java | {
"start": 1226,
"end": 6686
} | class ____ {
private record ChangelogsBundleWrapper(
QualifiedVersion version,
ChangelogBundle bundle,
Map<String, Map<String, List<ChangelogEntry>>> changelogsByTypeByArea,
QualifiedVersion unqualifiedVersion,
String versionWithoutSeparator,
List<ChangelogEntry.Highlight> notableHighlights,
List<ChangelogEntry.Highlight> nonNotableHighlights
) {}
/**
* These mappings translate change types into the headings as they should appear in the release notes.
*/
private static final Map<String, String> TYPE_LABELS = new HashMap<>();
static {
TYPE_LABELS.put("breaking", "Breaking changes");
TYPE_LABELS.put("breaking-java", "Breaking Java changes");
TYPE_LABELS.put("bug", "Bug fixes");
TYPE_LABELS.put("fixes", "Fixes");
TYPE_LABELS.put("deprecation", "Deprecations");
TYPE_LABELS.put("enhancement", "Enhancements");
TYPE_LABELS.put("feature", "New features");
TYPE_LABELS.put("features-enhancements", "Features and enhancements");
TYPE_LABELS.put("new-aggregation", "New aggregation");
TYPE_LABELS.put("regression", "Regressions");
TYPE_LABELS.put("upgrade", "Upgrades");
}
/**
* These are the types of changes that are considered "Features and Enhancements" in the release notes.
*/
private static final List<String> FEATURE_ENHANCEMENT_TYPES = List.of("feature", "new-aggregation", "enhancement", "upgrade");
static void update(File templateFile, File outputFile, List<ChangelogBundle> bundles) throws IOException {
final String templateString = Files.readString(templateFile.toPath());
try (FileWriter output = new FileWriter(outputFile)) {
output.write(generateFile(templateString, bundles));
}
}
@VisibleForTesting
static String generateFile(String template, List<ChangelogBundle> bundles) throws IOException {
var bundlesWrapped = new ArrayList<ChangelogsBundleWrapper>();
for (var bundle : bundles) {
var changelogs = bundle.changelogs();
final var changelogsByTypeByArea = buildChangelogBreakdown(changelogs);
final Map<Boolean, List<ChangelogEntry.Highlight>> groupedHighlights = changelogs.stream()
.map(ChangelogEntry::getHighlight)
.filter(Objects::nonNull)
.sorted(comparingInt(ChangelogEntry.Highlight::getPr))
.collect(groupingBy(ChangelogEntry.Highlight::isNotable, toList()));
final var notableHighlights = groupedHighlights.getOrDefault(true, List.of());
final var nonNotableHighlights = groupedHighlights.getOrDefault(false, List.of());
final var version = QualifiedVersion.of(bundle.version());
final var versionWithoutSeparator = version.withoutQualifier().toString().replaceAll("\\.", "");
final var wrapped = new ChangelogsBundleWrapper(
version,
bundle,
changelogsByTypeByArea,
version.withoutQualifier(),
versionWithoutSeparator,
notableHighlights,
nonNotableHighlights
);
bundlesWrapped.add(wrapped);
}
final Map<String, Object> bindings = new HashMap<>();
bindings.put("TYPE_LABELS", TYPE_LABELS);
bindings.put("changelogBundles", bundlesWrapped);
return TemplateUtils.render(template, bindings);
}
/**
* The new markdown release notes are grouping several of the old change types together.
* This method maps the change type that developers use in the changelogs to the new type that the release notes cares about.
*/
private static String getTypeFromEntry(ChangelogEntry entry) {
if (entry.getBreaking() != null) {
return "breaking";
}
if (FEATURE_ENHANCEMENT_TYPES.contains(entry.getType())) {
return "features-enhancements";
}
if (entry.getType().equals("bug")) {
return "fixes";
}
return entry.getType();
}
private static Map<String, Map<String, List<ChangelogEntry>>> buildChangelogBreakdown(Collection<ChangelogEntry> changelogs) {
Map<String, Map<String, List<ChangelogEntry>>> changelogsByTypeByArea = changelogs.stream()
.collect(
groupingBy(
// Entries with breaking info are always put in the breaking section
entry -> getTypeFromEntry(entry),
TreeMap::new,
// Group changelogs for each type by their team area
groupingBy(
// `security` and `known-issue` areas don't need to supply an area
entry -> entry.getType().equals("known-issue") || entry.getType().equals("security") ? "_all_" : entry.getArea(),
TreeMap::new,
toList()
)
)
);
// Sort per-area changelogs by their summary text. Assumes that the underlying list is sortable
changelogsByTypeByArea.forEach(
(_type, byTeam) -> byTeam.forEach((_team, changelogsForTeam) -> changelogsForTeam.sort(comparing(ChangelogEntry::getSummary)))
);
return changelogsByTypeByArea;
}
}
| ReleaseNotesGenerator |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/classrealm/DefaultClassRealmManager.java | {
"start": 13302,
"end": 13774
} | class ____ {}", classRealm.getId());
for (String imp : parentImports) {
logger.debug(" Imported: {} < {}", imp, getId(classRealm.getParentClassLoader()));
classRealm.importFromParent(imp);
}
}
}
private static Object getId(ClassLoader classLoader) {
if (classLoader instanceof ClassRealm classRealm) {
return classRealm.getId();
}
return classLoader;
}
}
| realm |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/MapAssertBaseTest.java | {
"start": 1030,
"end": 1828
} | class ____ extends BaseTestTemplate<MapAssert<Object, Object>, Map<Object, Object>> {
protected Maps maps;
@Override
protected MapAssert<Object, Object> create_assertions() {
return new MapAssert<>(emptyMap());
}
@Override
protected void inject_internal_objects() {
super.inject_internal_objects();
maps = mock(Maps.class);
assertions.maps = maps;
}
protected static <K, V> Map.Entry<K, V> javaMapEntry(K key, V value) {
return new SimpleImmutableEntry<>(key, value);
}
protected static <K, V> Map<K, V> map(K key, V value) {
return singletonMap(key, value);
}
protected static <K, V> Map<K, V> map(K k1, V v1, K k2, V v2) {
Map<K, V> map = new LinkedHashMap<>();
map.put(k1, v1);
map.put(k2, v2);
return map;
}
}
| MapAssertBaseTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java | {
"start": 10400,
"end": 10480
} | interface ____ extends SequenceFile.Writer.Option { }
private static | Option |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/ConcurrentAuthTest.java | {
"start": 1100,
"end": 3984
} | class ____ extends AbstractGraphQLTest {
static Map<String, String> PROPERTIES = new HashMap<>();
static {
PROPERTIES.put("quarkus.smallrye-graphql.error-extension-fields", "classification,code");
PROPERTIES.put("quarkus.smallrye-graphql.show-runtime-exception-message", "java.lang.SecurityException");
PROPERTIES.put("quarkus.http.auth.basic", "true");
PROPERTIES.put("quarkus.security.users.embedded.enabled", "true");
PROPERTIES.put("quarkus.security.users.embedded.plain-text", "true");
PROPERTIES.put("quarkus.security.users.embedded.users.scott", "jb0ss");
}
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(FilmResource.class, Film.class, GalaxyService.class)
.addAsResource(new StringAsset(getPropertyAsString(PROPERTIES)), "application.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"));
private int iterations = 5000;
@Test
public void concurrentAllFilmsOnly() throws InterruptedException, ExecutionException {
ExecutorService executor = Executors.newFixedThreadPool(50);
try {
var futures = new ArrayList<CompletableFuture<Boolean>>(iterations);
for (int i = 0; i < iterations; i++) {
futures.add(CompletableFuture.supplyAsync(this::allFilmsRequestWithAuth, executor)
.thenApply(r -> !r.getBody().asString().contains("unauthorized")));
}
Optional<Boolean> success = getTestResult(futures);
Assertions.assertTrue(success.orElse(false), "Unauthorized response codes were found");
} finally {
executor.shutdown();
}
}
private static Optional<Boolean> getTestResult(ArrayList<CompletableFuture<Boolean>> futures)
throws InterruptedException, ExecutionException {
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
.thenApply(v -> futures.stream()
.map(CompletableFuture::join)
.reduce(Boolean::logicalAnd))
.get();
}
private Response allFilmsRequestWithAuth() {
String requestBody = "{\"query\":" +
"\"" +
"{" +
" allFilmsSecured {" +
" title" +
" director" +
" releaseDate" +
" episodeID" +
"}" +
"}" +
"\"" +
"}";
return given()
.body(requestBody)
.auth()
.preemptive()
.basic("scott", "jb0ss")
.post("/graphql/");
}
@GraphQLApi
public static | ConcurrentAuthTest |
java | apache__camel | components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyContentBasedRouteTest.java | {
"start": 1035,
"end": 2159
} | class ____ extends BaseJettyTest {
private final String serverUri = "http://localhost:" + getPort() + "/myservice";
@Test
public void testSendOne() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:one");
mock.expectedHeaderReceived("one", "true");
template.requestBody(serverUri + "?one=true", null, Object.class);
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testSendOther() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:other");
mock.expectedHeaderReceived("two", "true");
template.requestBody(serverUri + "?two=true", null, Object.class);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: e1
from("jetty:" + serverUri).choice().when().simple("${header.one}").to("mock:one").otherwise().to("mock:other");
// END SNIPPET: e1
}
};
}
}
| JettyContentBasedRouteTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java | {
"start": 1165,
"end": 1407
} | class ____
extends IOException {
private static final long serialVersionUID = 1L;
public FileAlreadyExistsException() {
super();
}
public FileAlreadyExistsException(String msg) {
super(msg);
}
}
| FileAlreadyExistsException |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java | {
"start": 16094,
"end": 17265
} | class ____ subclass and implement the following
* 3 abstract methods.
* @return Function.
*/
protected abstract Function<URI, T> initAndGetTargetFs();
protected abstract T getTargetFileSystem(INodeDir<T> dir)
throws URISyntaxException, IOException;
protected abstract T getTargetFileSystem(String settings, URI[] mergeFsURIs)
throws UnsupportedFileSystemException, URISyntaxException, IOException;
private INodeDir<T> getRootDir() {
Preconditions.checkState(root.isInternalDir());
return (INodeDir<T>)root;
}
private INodeLink<T> getRootLink() {
Preconditions.checkState(!root.isInternalDir());
return (INodeLink<T>)root;
}
private boolean hasFallbackLink() {
return rootFallbackLink != null;
}
/**
* @return true if the root represented as internalDir. In LinkMergeSlash,
* there will be root to root mapping. So, root does not represent as
* internalDir.
*/
public boolean isRootInternalDir() {
return root.isInternalDir();
}
public INodeLink<T> getRootFallbackLink() {
Preconditions.checkState(root.isInternalDir());
return rootFallbackLink;
}
/**
* An internal | must |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/abstractclass/BaseMapperInterface.java | {
"start": 235,
"end": 334
} | interface ____ {
Target sourceToTargetFromBaseMapperInterface(Source source);
}
| BaseMapperInterface |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/tsdb/TSDBSyntheticIdCodec.java | {
"start": 3137,
"end": 3894
} | class ____ extends FilterCodec {
private final RewriteFieldInfosFormat fieldInfosFormat;
private final EnsureNoPostingsFormat postingsFormat;
public TSDBSyntheticIdCodec(String name, Codec delegate) {
super(name, delegate);
this.fieldInfosFormat = new RewriteFieldInfosFormat(delegate.fieldInfosFormat());
this.postingsFormat = new EnsureNoPostingsFormat(delegate.postingsFormat());
}
@Override
public final FieldInfosFormat fieldInfosFormat() {
return fieldInfosFormat;
}
@Override
public PostingsFormat postingsFormat() {
return postingsFormat;
}
/**
* {@link FieldInfosFormat} that overwrites the {@link FieldInfos}.
*/
private static | TSDBSyntheticIdCodec |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java | {
"start": 1993,
"end": 8043
} | class ____ {
String hostname;
List<Split> splits = new ArrayList<Split>();
Host(String hostname) {
this.hostname = hostname;
}
public String toString() {
StringBuilder result = new StringBuilder();
result.append(splits.size());
result.append(" ");
result.append(hostname);
return result.toString();
}
}
List<String> readFile(String filename) throws IOException {
List<String> result = new ArrayList<String>(10000);
try (BufferedReader in = new BufferedReader(
new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8))) {
String line = in.readLine();
while (line != null) {
result.add(line);
line = in.readLine();
}
}
return result;
}
public TeraScheduler(String splitFilename,
String nodeFilename) throws IOException {
slotsPerHost = 4;
// get the hosts
Map<String, Host> hostIds = new HashMap<String,Host>();
for(String hostName: readFile(nodeFilename)) {
Host host = new Host(hostName);
hosts.add(host);
hostIds.put(hostName, host);
}
// read the blocks
List<String> splitLines = readFile(splitFilename);
splits = new Split[splitLines.size()];
remainingSplits = 0;
for(String line: splitLines) {
StringTokenizer itr = new StringTokenizer(line);
Split newSplit = new Split(itr.nextToken());
splits[remainingSplits++] = newSplit;
while (itr.hasMoreTokens()) {
Host host = hostIds.get(itr.nextToken());
newSplit.locations.add(host);
host.splits.add(newSplit);
}
}
}
public TeraScheduler(FileSplit[] realSplits,
Configuration conf) throws IOException {
this.realSplits = realSplits;
this.slotsPerHost = conf.getInt(TTConfig.TT_MAP_SLOTS, 4);
Map<String, Host> hostTable = new HashMap<String, Host>();
splits = new Split[realSplits.length];
for(FileSplit realSplit: realSplits) {
Split split = new Split(realSplit.getPath().toString());
splits[remainingSplits++] = split;
for(String hostname: realSplit.getLocations()) {
Host host = hostTable.get(hostname);
if (host == null) {
host = new Host(hostname);
hostTable.put(hostname, host);
hosts.add(host);
}
host.splits.add(split);
split.locations.add(host);
}
}
}
Host pickBestHost() {
Host result = null;
int splits = Integer.MAX_VALUE;
for(Host host: hosts) {
if (host.splits.size() < splits) {
result = host;
splits = host.splits.size();
}
}
if (result != null) {
hosts.remove(result);
LOG.debug("picking " + result);
}
return result;
}
void pickBestSplits(Host host) {
int tasksToPick = Math.min(slotsPerHost,
(int) Math.ceil((double) remainingSplits /
hosts.size()));
Split[] best = new Split[tasksToPick];
for(Split cur: host.splits) {
LOG.debug(" examine: " + cur.filename + " " + cur.locations.size());
int i = 0;
while (i < tasksToPick && best[i] != null &&
best[i].locations.size() <= cur.locations.size()) {
i += 1;
}
if (i < tasksToPick) {
for(int j = tasksToPick - 1; j > i; --j) {
best[j] = best[j-1];
}
best[i] = cur;
}
}
// for the chosen blocks, remove them from the other locations
for(int i=0; i < tasksToPick; ++i) {
if (best[i] != null) {
LOG.debug(" best: " + best[i].filename);
for (Host other: best[i].locations) {
other.splits.remove(best[i]);
}
best[i].locations.clear();
best[i].locations.add(host);
best[i].isAssigned = true;
remainingSplits -= 1;
}
}
// for the non-chosen blocks, remove this host
for(Split cur: host.splits) {
if (!cur.isAssigned) {
cur.locations.remove(host);
}
}
}
void solve() throws IOException {
Host host = pickBestHost();
while (host != null) {
pickBestSplits(host);
host = pickBestHost();
}
}
/**
* Solve the schedule and modify the FileSplit array to reflect the new
* schedule. It will move placed splits to front and unplacable splits
* to the end.
* @return a new list of FileSplits that are modified to have the
* best host as the only host.
* @throws IOException
*/
public List<InputSplit> getNewFileSplits() throws IOException {
solve();
FileSplit[] result = new FileSplit[realSplits.length];
int left = 0;
int right = realSplits.length - 1;
for(int i=0; i < splits.length; ++i) {
if (splits[i].isAssigned) {
// copy the split and fix up the locations
String[] newLocations = {splits[i].locations.get(0).hostname};
realSplits[i] = new FileSplit(realSplits[i].getPath(),
realSplits[i].getStart(), realSplits[i].getLength(), newLocations);
result[left++] = realSplits[i];
} else {
result[right--] = realSplits[i];
}
}
List<InputSplit> ret = new ArrayList<InputSplit>();
for (FileSplit fs : result) {
ret.add(fs);
}
return ret;
}
public static void main(String[] args) throws IOException {
TeraScheduler problem = new TeraScheduler("block-loc.txt", "nodes");
for(Host host: problem.hosts) {
System.out.println(host);
}
LOG.info("starting solve");
problem.solve();
List<Split> leftOvers = new ArrayList<Split>();
for(int i=0; i < problem.splits.length; ++i) {
if (problem.splits[i].isAssigned) {
System.out.println("sched: " + problem.splits[i]);
} else {
leftOvers.add(problem.splits[i]);
}
}
for(Split cur: leftOvers) {
System.out.println("left: " + cur);
}
System.out.println("left over: " + leftOvers.size());
LOG.info("done");
}
}
| Host |
java | quarkusio__quarkus | extensions/mailer/runtime/src/test/java/io/quarkus/mailer/runtime/BlockingMailerImplTest.java | {
"start": 549,
"end": 4492
} | class ____ {
private Vertx vertx;
@BeforeEach
void setup() {
vertx = Vertx.vertx();
}
@AfterEach
void tearDown() {
if (vertx != null) {
vertx.close().await().indefinitely();
}
}
@Test
void testTimeoutThrowsException() {
ReactiveMailer slowMailer = new ReactiveMailer() {
@Override
public Uni<Void> send(Mail... mails) {
return Uni.createFrom().emitter(emitter -> {
// Never complete - simulates hanging connection
});
}
};
Mailer blockingMailer = new BlockingMailerImpl(slowMailer, Duration.ofSeconds(1));
Mail mail = Mail.withText("test@example.com", "Subject", "Body");
assertThatThrownBy(() -> blockingMailer.send(mail))
.isInstanceOf(io.smallrye.mutiny.TimeoutException.class);
}
@Test
void testTimeoutZeroWaitsIndefinitely() throws InterruptedException {
ReactiveMailer delayedMailer = new ReactiveMailer() {
@Override
public Uni<Void> send(Mail... mails) {
return Uni.createFrom().voidItem()
.onItem().delayIt().by(Duration.ofMillis(500));
}
};
Mailer blockingMailer = new BlockingMailerImpl(delayedMailer, Duration.ZERO);
Mail mail = Mail.withText("test@example.com", "Subject", "Body");
// Should complete successfully even though it takes 500ms
// because zero timeout means wait indefinitely
long startTime = System.currentTimeMillis();
blockingMailer.send(mail);
long duration = System.currentTimeMillis() - startTime;
assertThat(duration).isGreaterThanOrEqualTo(500);
}
@Test
void testTimeoutNullWaitsIndefinitely() {
ReactiveMailer delayedMailer = new ReactiveMailer() {
@Override
public Uni<Void> send(Mail... mails) {
return Uni.createFrom().voidItem()
.onItem().delayIt().by(Duration.ofMillis(500));
}
};
Mailer blockingMailer = new BlockingMailerImpl(delayedMailer, null);
Mail mail = Mail.withText("test@example.com", "Subject", "Body");
// Should complete successfully even though it takes 500ms
// because null timeout means wait indefinitely
long startTime = System.currentTimeMillis();
blockingMailer.send(mail);
long duration = System.currentTimeMillis() - startTime;
assertThat(duration).isGreaterThanOrEqualTo(500);
}
@Test
void testSuccessfulSendWithinTimeout() {
ReactiveMailer fastMailer = new ReactiveMailer() {
@Override
public Uni<Void> send(Mail... mails) {
return Uni.createFrom().voidItem();
}
};
Mailer blockingMailer = new BlockingMailerImpl(fastMailer, Duration.ofSeconds(10));
Mail mail = Mail.withText("test@example.com", "Subject", "Body");
long startTime = System.currentTimeMillis();
assertThatCode(() -> blockingMailer.send(mail)).doesNotThrowAnyException();
long duration = System.currentTimeMillis() - startTime;
assertThat(duration).isLessThan(1000);
}
@Test
void testExceptionPropagation() {
ReactiveMailer failingMailer = new ReactiveMailer() {
@Override
public Uni<Void> send(Mail... mails) {
return Uni.createFrom().failure(new RuntimeException("SMTP error"));
}
};
Mailer blockingMailer = new BlockingMailerImpl(failingMailer, Duration.ofSeconds(10));
Mail mail = Mail.withText("test@example.com", "Subject", "Body");
assertThatThrownBy(() -> blockingMailer.send(mail))
.isInstanceOf(RuntimeException.class)
.hasMessage("SMTP error");
}
}
| BlockingMailerImplTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.