language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilderTests.java | {
"start": 11813,
"end": 12009
} | class
____(
builder.stringDistance().toLucene().getClass(),
termSuggesterCtx.getDirectSpellCheckerSettings().stringDistance().getClass()
);
}
}
| assertEquals |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java | {
"start": 55610,
"end": 57309
} | interface ____ extends Attrs, PCData, _Child {
/** variable name for the text
* @param cdata the content of the element.
* @return the current element builder
*/
TEXTAREA $name(String cdata);
/** visible rows
* @param rows number of rows.
* @return the current element builder
*/
TEXTAREA $rows(int rows);
/** visible columns
* @param cols number of cols.
* @return the current element builder
*/
TEXTAREA $cols(int cols);
/** unavailable in this context
* @return the current element builder
*/
TEXTAREA $disabled();
/** text is readonly
* @return the current element builder
*/
TEXTAREA $readonly();
/** position in tabbing order
* @param index the index
* @return the current element builder
*/
TEXTAREA $tabindex(int index);
/** accessibility key character
* @param cdata the content of the element.
* @return the current element builder
*/
TEXTAREA $accesskey(String cdata);
/** the element got the focus
* @param script to invoke.
* @return the current element builder
*/
TEXTAREA $onfocus(String script);
/** the element lost the focus
* @param script to invoke.
* @return the current element builder
*/
TEXTAREA $onblur(String script);
/** some text was selected
* @param script to invoke.
* @return the current element builder
*/
TEXTAREA $onselect(String script);
/** the element value was changed
* @param script to invoke.
* @return the current element builder
*/
TEXTAREA $onchange(String script);
}
/**
*
*/
public | TEXTAREA |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_yangzhou.java | {
"start": 40536,
"end": 41202
} | enum ____ {
ASC("asc"),
DESC("desc");
private String value;
SortType(String value) {
this.value = value;
}
public String getValue() {
return value;
}
public static SortType getEnumByName(String name) {
SortType[] sortTypes = SortType.values();
for (SortType sortType : sortTypes) {
if (sortType.name().equalsIgnoreCase(name)) {
return sortType;
}
}
throw new RuntimeException("SortType of " + name + " | SortType |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/api/datastream/StatefulDataStreamV2ITCase.java | {
"start": 5987,
"end": 6300
} | class ____ implements ReduceFunction<Long> {
@Override
public Long reduce(Long value1, Long value2) throws Exception {
return value1 + value2;
}
}
/** {@link OneInputStreamProcessFunction} that sums records and outputs the sum. */
private static | MockReduceSumFunction |
java | playframework__playframework | core/play/src/test/java/play/mvc/ResultsTest.java | {
"start": 937,
"end": 17512
} | class ____ {
private static Path file;
private static final boolean INLINE_FILE = true;
private static final boolean ATTACHMENT_FILE = false;
@BeforeClass
public static void createFile() throws Exception {
file = Paths.get("test.tmp");
Files.createFile(file);
Files.write(file, "Some content for the file".getBytes(), StandardOpenOption.APPEND);
}
@AfterClass
public static void deleteFile() throws IOException {
Files.deleteIfExists(file);
}
@Test
public void shouldCopyFlashWhenCallingResultAs() {
Map<String, String> flash = new HashMap<>();
flash.put("flash.message", "flash message value");
Result result = Results.redirect("/somewhere").withFlash(flash);
Result as = result.as(Http.MimeTypes.HTML);
assertNotNull(as.flash());
assertTrue(as.flash().get("flash.message").isPresent());
assertEquals("flash message value", as.flash().get("flash.message").get());
}
@Test
public void shouldCopySessionWhenCallingResultAs() {
Map<String, String> session = new HashMap<>();
session.put("session.message", "session message value");
Result result = Results.ok("Result test body").withSession(session);
Result as = result.as(Http.MimeTypes.HTML);
assertNotNull(as.session());
assertTrue(as.session().get("session.message").isPresent());
assertEquals("session message value", as.session().get("session.message").get());
}
@Test
public void shouldCopyHeadersWhenCallingResultAs() {
Result result = Results.ok("Result test body").withHeader("X-Header", "header value");
Result as = result.as(Http.MimeTypes.HTML);
assertEquals("header value", as.header("X-Header").get());
}
@Test
public void shouldCopyCookiesWhenCallingResultAs() {
Result result =
Results.ok("Result test body")
.withCookies(Http.Cookie.builder("cookie-name", "cookie value").build())
.as(Http.MimeTypes.HTML);
assertEquals("cookie value", result.cookie("cookie-name").get().value());
}
// -- Path tests
@Test(expected = NullPointerException.class)
public void shouldThrowNullPointerExceptionIfPathIsNull() {
Results.ok().sendPath(null);
}
@Test
public void sendPathWithOKStatus() {
Result result = Results.ok().sendPath(file);
assertEquals(Http.Status.OK, result.status());
assertEquals(
"inline; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathWithUnauthorizedStatus() {
Result result = Results.unauthorized().sendPath(file);
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"inline; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathAsAttachmentWithUnauthorizedStatus() {
Result result = Results.unauthorized().sendPath(file, ATTACHMENT_FILE);
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"attachment; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathAsAttachmentWithOkStatus() {
Result result = Results.ok().sendPath(file, ATTACHMENT_FILE);
assertEquals(Http.Status.OK, result.status());
assertEquals(
"attachment; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathWithFileName() {
Result result = Results.unauthorized().sendPath(file, Optional.of("foo.bar"));
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"inline; filename=\"foo.bar\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathInlineWithFileName() {
Result result = Results.unauthorized().sendPath(file, INLINE_FILE, Optional.of("foo.bar"));
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"inline; filename=\"foo.bar\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathInlineWithoutFileName() {
Result result = Results.unauthorized().sendPath(file, Optional.empty());
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(Optional.empty(), result.header(HeaderNames.CONTENT_DISPOSITION));
}
@Test
public void sendPathAsAttachmentWithoutFileName() {
Result result = Results.unauthorized().sendPath(file, ATTACHMENT_FILE, Optional.empty());
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals("attachment", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendPathWithFileNameHasSpecialChars() {
Result result = Results.ok().sendPath(file, INLINE_FILE, Optional.of("测 试.tmp"));
assertEquals(Http.Status.OK, result.status());
assertEquals(
"inline; filename=\"? ?.tmp\"; filename*=utf-8''%e6%b5%8b%20%e8%af%95.tmp",
result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
// -- File tests
@Test(expected = NullPointerException.class)
public void shouldThrowNullPointerExceptionIfFileIsNull() {
Results.ok().sendFile(null);
}
@Test
public void sendFileWithOKStatus() {
Result result = Results.ok().sendFile(file.toFile());
assertEquals(Http.Status.OK, result.status());
assertEquals(
"inline; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileWithUnauthorizedStatus() {
Result result = Results.unauthorized().sendFile(file.toFile());
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"inline; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileAsAttachmentWithUnauthorizedStatus() {
Result result = Results.unauthorized().sendFile(file.toFile(), ATTACHMENT_FILE);
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"attachment; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileAsAttachmentWithOkStatus() {
Result result = Results.ok().sendFile(file.toFile(), ATTACHMENT_FILE);
assertEquals(Http.Status.OK, result.status());
assertEquals(
"attachment; filename=\"test.tmp\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileWithFileName() {
Result result = Results.unauthorized().sendFile(file.toFile(), Optional.of("foo.bar"));
assertEquals(Http.Status.UNAUTHORIZED, result.status());
assertEquals(
"inline; filename=\"foo.bar\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileInlineWithFileName() {
Result result = Results.ok().sendFile(file.toFile(), INLINE_FILE, Optional.of("foo.bar"));
assertEquals(Http.Status.OK, result.status());
assertEquals(
"inline; filename=\"foo.bar\"", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileInlineWithoutFileName() {
Result result = Results.ok().sendFile(file.toFile(), Optional.empty());
assertEquals(Http.Status.OK, result.status());
assertEquals(Optional.empty(), result.header(HeaderNames.CONTENT_DISPOSITION));
}
@Test
public void sendFileAsAttachmentWithoutFileName() {
Result result = Results.ok().sendFile(file.toFile(), ATTACHMENT_FILE, Optional.empty());
assertEquals(Http.Status.OK, result.status());
assertEquals("attachment", result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileWithFileNameHasSpecialChars() {
Result result = Results.ok().sendFile(file.toFile(), INLINE_FILE, Optional.of("测 试.tmp"));
assertEquals(Http.Status.OK, result.status());
assertEquals(
"inline; filename=\"? ?.tmp\"; filename*=utf-8''%e6%b5%8b%20%e8%af%95.tmp",
result.header(HeaderNames.CONTENT_DISPOSITION).get());
}
@Test
public void sendFileHonoringOnClose() throws TimeoutException, InterruptedException {
ActorSystem actorSystem = ActorSystem.create("TestSystem");
Materializer mat = Materializer.matFromSystem(actorSystem);
try {
AtomicBoolean fileSent = new AtomicBoolean(false);
Result result = Results.ok().sendFile(file.toFile(), () -> fileSent.set(true), null);
// Actually we need to wait until the Stream completes
Await.ready(
FutureConverters.asScala(result.body().dataStream().runWith(Sink.ignore(), mat)),
Duration.create("60s"));
// and then we need to wait until the onClose completes
Thread.sleep(500);
assertTrue(fileSent.get());
assertEquals(Http.Status.OK, result.status());
} finally {
Await.ready(actorSystem.terminate(), Duration.create("60s"));
}
}
@Test
public void sendPathHonoringOnClose() throws TimeoutException, InterruptedException {
ActorSystem actorSystem = ActorSystem.create("TestSystem");
Materializer mat = Materializer.matFromSystem(actorSystem);
try {
AtomicBoolean fileSent = new AtomicBoolean(false);
Result result = Results.ok().sendPath(file, () -> fileSent.set(true), null);
// Actually we need to wait until the Stream completes
Await.ready(
FutureConverters.asScala(result.body().dataStream().runWith(Sink.ignore(), mat)),
Duration.create("60s"));
// and then we need to wait until the onClose completes
Thread.sleep(500);
assertTrue(fileSent.get());
assertEquals(Http.Status.OK, result.status());
} finally {
Await.ready(actorSystem.terminate(), Duration.create("60s"));
}
}
@Test
public void sendResourceHonoringOnClose() throws TimeoutException, InterruptedException {
ActorSystem actorSystem = ActorSystem.create("TestSystem");
Materializer mat = Materializer.matFromSystem(actorSystem);
try {
AtomicBoolean fileSent = new AtomicBoolean(false);
Result result =
Results.ok().sendResource("multipart-form-data-file.txt", () -> fileSent.set(true), null);
// Actually we need to wait until the Stream completes
Await.ready(
FutureConverters.asScala(result.body().dataStream().runWith(Sink.ignore(), mat)),
Duration.create("60s"));
// and then we need to wait until the onClose completes
Thread.sleep(500);
assertTrue(fileSent.get());
assertEquals(Http.Status.OK, result.status());
} finally {
Await.ready(actorSystem.terminate(), Duration.create("60s"));
}
}
@Test
public void sendInputStreamHonoringOnClose() throws TimeoutException, InterruptedException {
ActorSystem actorSystem = ActorSystem.create("TestSystem");
Materializer mat = Materializer.matFromSystem(actorSystem);
try {
AtomicBoolean fileSent = new AtomicBoolean(false);
Result result =
Results.ok()
.sendInputStream(
new ByteArrayInputStream("test data".getBytes()),
9,
() -> fileSent.set(true),
null);
// Actually we need to wait until the Stream completes
Await.ready(
FutureConverters.asScala(result.body().dataStream().runWith(Sink.ignore(), mat)),
Duration.create("60s"));
// and then we need to wait until the onClose completes
Thread.sleep(500);
assertTrue(fileSent.get());
assertEquals(Http.Status.OK, result.status());
} finally {
Await.ready(actorSystem.terminate(), Duration.create("60s"));
}
}
@Test
public void sendInputStreamChunkedHonoringOnClose()
throws TimeoutException, InterruptedException {
ActorSystem actorSystem = ActorSystem.create("TestSystem");
Materializer mat = Materializer.matFromSystem(actorSystem);
try {
AtomicBoolean fileSent = new AtomicBoolean(false);
Result result =
Results.ok()
.sendInputStream(
new ByteArrayInputStream("test data".getBytes()), () -> fileSent.set(true), null);
// Actually we need to wait until the Stream completes
Await.ready(
FutureConverters.asScala(result.body().dataStream().runWith(Sink.ignore(), mat)),
Duration.create("60s"));
// and then we need to wait until the onClose completes
Thread.sleep(500);
assertTrue(fileSent.get());
assertEquals(Http.Status.OK, result.status());
} finally {
Await.ready(actorSystem.terminate(), Duration.create("60s"));
}
}
@Test
public void getOptionalCookie() {
Result result =
Results.ok()
.withCookies(
new Http.Cookie("foo", "1", 1000, "/", "example.com", false, true, null, false));
assertTrue(result.cookie("foo").isPresent());
assertEquals("foo", result.cookie("foo").get().name());
assertFalse(result.cookie("bar").isPresent());
}
@Test
public void redirectShouldReturnTheSameUrlIfTheQueryStringParamsMapIsEmpty() {
Map<String, List<String>> queryStringParameters = new HashMap<>();
String url = "/somewhere";
Result result = Results.redirect(url, queryStringParameters);
assertTrue(result.redirectLocation().isPresent());
assertEquals(url, result.redirectLocation().get());
}
@Test
public void redirectAppendGivenQueryStringParamsToTheUrlIfUrlContainsQuestionMark() {
Map<String, List<String>> queryStringParameters = new HashMap<>();
queryStringParameters.put("param1", Arrays.asList("value1"));
String url = "/somewhere?param2=value2";
String expectedRedirectUrl = "/somewhere?param2=value2¶m1=value1";
Result result = Results.redirect(url, queryStringParameters);
assertTrue(result.redirectLocation().isPresent());
assertEquals(expectedRedirectUrl, result.redirectLocation().get());
}
@Test
public void redirectShouldAddQueryStringParamsToTheUrl() {
Map<String, List<String>> queryStringParameters = new HashMap<>();
queryStringParameters.put("param1", Arrays.asList("value1"));
queryStringParameters.put("param2", Arrays.asList("value2"));
String url = "/somewhere";
String expectedParam1 = "param1=value1";
String expectedParam2 = "param2=value2";
Result result = Results.redirect(url, queryStringParameters);
assertTrue(result.redirectLocation().isPresent());
assertTrue(result.redirectLocation().get().contains(expectedParam1));
assertTrue(result.redirectLocation().get().contains(expectedParam2));
}
@Test
public void canAddAttributes() {
TypedKey<String> x = TypedKey.create("x");
TypedMap attrs = TypedMap.create(new TypedEntry<>(x, "y"));
Result result = Results.ok().withAttrs(attrs);
assertTrue(result.attrs().containsKey(x));
assertEquals("y", result.attrs().get(x));
}
@Test
public void keepAttributesWhenModifyingHeader() {
TypedKey<String> x = TypedKey.create("x");
TypedMap attrs = TypedMap.create(new TypedEntry<>(x, "y"));
Result a = Results.ok().withAttrs(attrs).withHeader("foo", "bar");
assertTrue(a.attrs().containsKey(x));
assertEquals("y", a.attrs().get(x));
Result b = Results.ok().withAttrs(attrs).withHeaders("foo", "bar");
assertTrue(b.attrs().containsKey(x));
assertEquals("y", b.attrs().get(x));
Result c = Results.ok().withAttrs(attrs).withoutHeader("foo");
assertTrue(c.attrs().containsKey(x));
assertEquals("y", c.attrs().get(x));
}
@Test
public void keepAttributesWhenModifyingFlash() {
TypedKey<String> x = TypedKey.create("x");
TypedMap attrs = TypedMap.create(new TypedEntry<>(x, "y"));
Result result =
Results.redirect("/").withAttrs(attrs).withFlash(new Http.Flash(Map.of("foo", "bar")));
assertTrue(result.attrs().containsKey(x));
assertEquals("y", result.attrs().get(x));
}
@Test
public void keepAttributesWhenModifyingSession() {
TypedKey<String> x = TypedKey.create("x");
TypedMap attrs = TypedMap.create(new TypedEntry<>(x, "y"));
Result result =
Results.ok().withAttrs(attrs).withSession(new Http.Session(Map.of("foo", "bar")));
assertTrue(result.attrs().containsKey(x));
assertEquals("y", result.attrs().get(x));
}
@Test
public void keepAttributesWhenModifyingContentType() {
TypedKey<String> x = TypedKey.create("x");
TypedMap attrs = TypedMap.create(new TypedEntry<>(x, "y"));
Result result = Results.ok().withAttrs(attrs).as(Http.MimeTypes.TEXT);
assertTrue(result.attrs().containsKey(x));
assertEquals("y", result.attrs().get(x));
}
}
| ResultsTest |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AtomComponentBuilderFactory.java | {
"start": 1753,
"end": 4100
} | interface ____ extends ComponentBuilder<AtomComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AtomComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default AtomComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| AtomComponentBuilder |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/observers/FutureSingleObserverTest.java | {
"start": 1191,
"end": 5595
} | class ____ extends RxJavaTest {
@Test
public void cancel() {
final Future<?> f = Single.never().toFuture();
assertFalse(f.isCancelled());
assertFalse(f.isDone());
f.cancel(true);
assertTrue(f.isCancelled());
assertTrue(f.isDone());
try {
f.get();
fail("Should have thrown!");
} catch (CancellationException ex) {
// expected
} catch (InterruptedException ex) {
throw new AssertionError(ex);
} catch (ExecutionException ex) {
throw new AssertionError(ex);
}
try {
f.get(5, TimeUnit.SECONDS);
fail("Should have thrown!");
} catch (CancellationException ex) {
// expected
} catch (InterruptedException ex) {
throw new AssertionError(ex);
} catch (ExecutionException ex) {
throw new AssertionError(ex);
} catch (TimeoutException ex) {
throw new AssertionError(ex);
}
}
@Test
public void cancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final Future<?> f = Single.never().toFuture();
Runnable r = new Runnable() {
@Override
public void run() {
f.cancel(true);
}
};
TestHelper.race(r, r);
}
}
@Test
public void timeout() throws Exception {
Future<?> f = Single.never().toFuture();
try {
f.get(100, TimeUnit.MILLISECONDS);
fail("Should have thrown");
} catch (TimeoutException expected) {
assertEquals(timeoutMessage(100, TimeUnit.MILLISECONDS), expected.getMessage());
}
}
@Test
public void dispose() {
Future<Integer> f = Single.just(1).toFuture();
((Disposable)f).dispose();
assertTrue(((Disposable)f).isDisposed());
}
@Test
public void errorGetWithTimeout() throws Exception {
Future<?> f = Single.error(new TestException()).toFuture();
try {
f.get(5, TimeUnit.SECONDS);
fail("Should have thrown");
} catch (ExecutionException ex) {
assertTrue(ex.toString(), ex.getCause() instanceof TestException);
}
}
@Test
public void normalGetWitHTimeout() throws Exception {
Future<Integer> f = Single.just(1).toFuture();
assertEquals(1, f.get(5, TimeUnit.SECONDS).intValue());
}
@Test
public void getAwait() throws Exception {
Future<Integer> f = Single.just(1).delay(100, TimeUnit.MILLISECONDS).toFuture();
assertEquals(1, f.get(5, TimeUnit.SECONDS).intValue());
}
@Test
public void onSuccessCancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishSubject<Integer> ps = PublishSubject.create();
final Future<?> f = ps.single(-99).toFuture();
ps.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
f.cancel(true);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ps.onComplete();
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void onErrorCancelRace() {
RxJavaPlugins.setErrorHandler(Functions.emptyConsumer());
try {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishSubject<Integer> ps = PublishSubject.create();
final Future<?> f = ps.single(-99).toFuture();
final TestException ex = new TestException();
Runnable r1 = new Runnable() {
@Override
public void run() {
f.cancel(true);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ps.onError(ex);
}
};
TestHelper.race(r1, r2);
}
} finally {
RxJavaPlugins.reset();
}
}
}
| FutureSingleObserverTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/YodaConditionTest.java | {
"start": 5897,
"end": 6146
} | class ____ {
boolean yoda(E a) {
return E.A.equals(a);
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.Objects;
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/spi/PreLoadEventListener.java | {
"start": 251,
"end": 323
} | interface ____ {
void onPreLoad(PreLoadEvent event);
}
| PreLoadEventListener |
java | spring-projects__spring-framework | spring-core/src/main/java24/org/springframework/core/type/classreading/MetadataReaderFactoryDelegate.java | {
"start": 1024,
"end": 1346
} | class ____ {
static MetadataReaderFactory create(@Nullable ResourceLoader resourceLoader) {
return new ClassFileMetadataReaderFactory(resourceLoader);
}
static MetadataReaderFactory create(@Nullable ClassLoader classLoader) {
return new ClassFileMetadataReaderFactory(classLoader);
}
}
| MetadataReaderFactoryDelegate |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java | {
"start": 5036,
"end": 10722
} | class ____ extends ArrayList<User> {
private long total = 0;
UserCounts(int capacity) {
super(capacity);
}
@Override
public boolean add(User user) {
long count = user.getCount();
int i = indexOf(user);
if (i == -1) {
super.add(new User(user.getUser(), count));
} else {
get(i).add(count);
}
total += count;
return true;
}
@Override
public boolean addAll(Collection<? extends User> users) {
users.forEach(user -> add(user));
return true;
}
public long getTotal() {
return total;
}
}
/**
* A mapping from each reported metric to its {@link RollingWindowMap} that
* maintains the set of {@link RollingWindow}s for the users that have
* operated on that metric.
*/
public ConcurrentHashMap<String, RollingWindowMap> metricMap =
new ConcurrentHashMap<>();
public RollingWindowManager(Configuration conf, int reportingPeriodMs) {
windowLenMs = reportingPeriodMs;
bucketsPerWindow =
conf.getInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY,
DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_DEFAULT);
Preconditions.checkArgument(bucketsPerWindow > 0,
"a window should have at least one bucket");
Preconditions.checkArgument(bucketsPerWindow <= windowLenMs,
"the minimum size of a bucket is 1 ms");
//same-size buckets
Preconditions.checkArgument(windowLenMs % bucketsPerWindow == 0,
"window size must be a multiplication of number of buckets");
topUsersCnt =
conf.getInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY,
DFSConfigKeys.NNTOP_NUM_USERS_DEFAULT);
Preconditions.checkArgument(topUsersCnt > 0,
"the number of requested top users must be at least 1");
}
/**
* Called when the metric command is changed by "delta" units at time "time"
* via user "user"
*
* @param time the time of the event
* @param command the metric that is updated, e.g., the operation name
* @param user the user that updated the metric
* @param delta the amount of change in the metric, e.g., +1
*/
public void recordMetric(long time, String command,
String user, long delta) {
RollingWindow window = getRollingWindow(command, user);
window.incAt(time, delta);
}
/**
* Take a snapshot of current top users in the past period.
*
* @param time the current time
* @return a TopWindow describing the top users for each metric in the
* window.
*/
public TopWindow snapshot(long time) {
TopWindow window = new TopWindow(windowLenMs);
Set<String> metricNames = metricMap.keySet();
LOG.debug("iterating in reported metrics, size={} values={}",
metricNames.size(), metricNames);
UserCounts totalCounts = new UserCounts(metricMap.size());
for (Map.Entry<String, RollingWindowMap> entry : metricMap.entrySet()) {
String metricName = entry.getKey();
RollingWindowMap rollingWindows = entry.getValue();
UserCounts topN = getTopUsersForMetric(time, metricName, rollingWindows);
if (!topN.isEmpty()) {
window.addOp(new Op(metricName, topN, topUsersCnt));
totalCounts.addAll(topN);
}
}
// synthesize the overall total op count with the top users for every op.
Set<User> topUsers = new HashSet<>();
for (Op op : window.getOps()) {
topUsers.addAll(op.getTopUsers());
}
// intersect totals with the top users.
totalCounts.retainAll(topUsers);
// allowed to exceed the per-op topUsersCnt to capture total ops for
// any user
window.addOp(new Op(TopConf.ALL_CMDS, totalCounts, Integer.MAX_VALUE));
return window;
}
/**
* Calculates the top N users over a time interval.
*
* @param time the current time
* @param metricName Name of metric
* @return
*/
private UserCounts getTopUsersForMetric(long time, String metricName,
RollingWindowMap rollingWindows) {
UserCounts topN = new UserCounts(topUsersCnt);
Iterator<Map.Entry<String, RollingWindow>> iterator =
rollingWindows.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, RollingWindow> entry = iterator.next();
String userName = entry.getKey();
RollingWindow aWindow = entry.getValue();
long windowSum = aWindow.getSum(time);
// do the gc here
if (windowSum == 0) {
LOG.debug("gc window of metric: {} userName: {}",
metricName, userName);
iterator.remove();
continue;
}
LOG.debug("offer window of metric: {} userName: {} sum: {}",
metricName, userName, windowSum);
topN.add(new User(userName, windowSum));
}
LOG.debug("topN users size for command {} is: {}",
metricName, topN.size());
return topN;
}
/**
* Get the rolling window specified by metric and user.
*
* @param metric the updated metric
* @param user the user that updated the metric
* @return the rolling window
*/
private RollingWindow getRollingWindow(String metric, String user) {
RollingWindowMap rwMap = metricMap.get(metric);
if (rwMap == null) {
rwMap = new RollingWindowMap();
RollingWindowMap prevRwMap = metricMap.putIfAbsent(metric, rwMap);
if (prevRwMap != null) {
rwMap = prevRwMap;
}
}
RollingWindow window = rwMap.get(user);
if (window != null) {
return window;
}
window = new RollingWindow(windowLenMs, bucketsPerWindow);
RollingWindow prevWindow = rwMap.putIfAbsent(user, window);
if (prevWindow != null) {
window = prevWindow;
}
return window;
}
}
| UserCounts |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_isNotEmpty_Test.java | {
"start": 834,
"end": 1174
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.isNotEmpty();
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertNotEmpty(info(), internalArray());
}
}
| AtomicReferenceArrayAssert_isNotEmpty_Test |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/modelsize/EnsembleSizeInfoTests.java | {
"start": 1026,
"end": 3640
} | class ____ extends SizeEstimatorTestCase<EnsembleSizeInfo, EnsembleInferenceModel> {
static EnsembleSizeInfo createRandom() {
return new EnsembleSizeInfo(
Stream.generate(TreeSizeInfoTests::createRandom).limit(randomIntBetween(1, 100)).collect(Collectors.toList()),
randomIntBetween(1, 10000),
Stream.generate(() -> randomIntBetween(1, 10)).limit(randomIntBetween(1, 10)).collect(Collectors.toList()),
randomIntBetween(0, 10),
randomIntBetween(0, 10),
randomIntBetween(0, 10)
);
}
static EnsembleSizeInfo translateToEstimate(EnsembleInferenceModel ensemble) {
TreeInferenceModel tree = (TreeInferenceModel) ensemble.getModels().get(0);
int numClasses = Arrays.stream(tree.getNodes())
.filter(TreeInferenceModel.Node::isLeaf)
.map(n -> (TreeInferenceModel.LeafNode) n)
.findFirst()
.get()
.getLeafValue().length;
return new EnsembleSizeInfo(
ensemble.getModels()
.stream()
.map(m -> TreeSizeInfoTests.translateToEstimate((TreeInferenceModel) m))
.collect(Collectors.toList()),
randomIntBetween(0, 10),
Arrays.stream(ensemble.getFeatureNames()).map(String::length).collect(Collectors.toList()),
ensemble.getOutputAggregator().expectedValueSize() == null ? 0 : ensemble.getOutputAggregator().expectedValueSize(),
ensemble.getClassificationWeights() == null ? 0 : ensemble.getClassificationWeights().length,
numClasses
);
}
@Override
protected EnsembleSizeInfo createTestInstance() {
return createRandom();
}
@Override
protected EnsembleSizeInfo doParseInstance(XContentParser parser) {
return EnsembleSizeInfo.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
@Override
EnsembleInferenceModel generateTrueObject() {
try {
Ensemble model = EnsembleTests.createRandom();
EnsembleInferenceModel inferenceModel = EnsembleInferenceModelTests.serializeFromTrainedModel(model);
inferenceModel.rewriteFeatureIndices(Collections.emptyMap());
return inferenceModel;
} catch (IOException ex) {
throw new ElasticsearchException(ex);
}
}
@Override
EnsembleSizeInfo translateObject(EnsembleInferenceModel originalObject) {
return translateToEstimate(originalObject);
}
}
| EnsembleSizeInfoTests |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4072InactiveProfileReposTest.java | {
"start": 1040,
"end": 2230
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that repositories from inactive profiles are actually not used for artifact resolution.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4072");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteArtifacts("org.apache.maven.its.mng4072");
verifier.filterFile("pom-template.xml", "pom.xml");
verifier.filterFile("profiles-template.xml", "profiles.xml");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
try {
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
fail("Dependency resolution succeeded although all profiles are inactive");
} catch (Exception e) {
// expected, all profiles are inactive, hence the repos inaccessible
}
}
}
| MavenITmng4072InactiveProfileReposTest |
java | google__truth | core/src/test/java/com/google/common/truth/OptionalIntSubjectTest.java | {
"start": 1013,
"end": 2932
} | class ____ {
@Test
public void failOnNullSubject() {
AssertionError e = expectFailure(whenTesting -> whenTesting.that((OptionalInt) null).isEmpty());
assertThat(e).factKeys().containsExactly("expected empty optional", "but was").inOrder();
}
@Test
public void isPresent() {
assertThat(OptionalInt.of(1337)).isPresent();
}
@Test
public void isPresentFailing() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(OptionalInt.empty()).isPresent());
assertThat(e).factKeys().containsExactly("expected to be present");
}
@Test
public void isEmpty() {
assertThat(OptionalInt.empty()).isEmpty();
}
@Test
public void isEmptyFailing() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(OptionalInt.of(1337)).isEmpty());
assertThat(e).factKeys().contains("expected to be empty");
assertThat(e).factValue("but was present with value").isEqualTo("1337");
}
@Test
public void isEmptyFailingNull() {
AssertionError e = expectFailure(whenTesting -> whenTesting.that((OptionalInt) null).isEmpty());
assertThat(e).factKeys().containsExactly("expected empty optional", "but was").inOrder();
}
@Test
public void hasValue() {
assertThat(OptionalInt.of(1337)).hasValue(1337);
}
@Test
public void hasValue_failingWithEmpty() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(OptionalInt.empty()).hasValue(1337));
assertThat(e).factKeys().containsExactly("expected to have value", "but was absent").inOrder();
assertThat(e).factValue("expected to have value").isEqualTo("1337");
}
@Test
public void hasValue_failingWithWrongValue() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(OptionalInt.of(1337)).hasValue(42));
assertThat(e).factValue("value of").isEqualTo("optionalInt.getAsInt()");
}
}
| OptionalIntSubjectTest |
java | quarkusio__quarkus | extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/KeyStoreWithSniAndAliasSetTest.java | {
"start": 961,
"end": 1857
} | class ____ {
private static final String configuration = """
quarkus.tls.key-store.p12.path=target/certs/test-sni-p12-keystore.p12
quarkus.tls.key-store.p12.password=sni
quarkus.tls.key-store.p12.alias-password=sni
quarkus.tls.key-store.p12.alias=sni-1
quarkus.tls.key-store.sni=true
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"))
.assertException(t -> assertThat(t).hasMessageContaining("alias", "sni"));
@Test
void test() throws KeyStoreException {
fail("Should not be called as the deployment should fail due to the alias in the configuration.");
}
}
| KeyStoreWithSniAndAliasSetTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java | {
"start": 2017,
"end": 2344
} | class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(TestHarFileSystem.class);
/**
* FileSystem methods that must not be overwritten by
* {@link HarFileSystem}. Either because there is a default implementation
* already available or because it is not relevant.
*/
private | TestHarFileSystem |
java | spring-projects__spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/context/ImportsContextCustomizerTests.java | {
"start": 5102,
"end": 5185
} | class ____ {
}
@AliasedImport(FirstImportedClass.class)
static | SecondImportedClass |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 29813,
"end": 30231
} | interface ____ {",
" B build();",
" }",
"}");
Source cComponent =
CompilerTests.javaSource(
"test.C",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = C.CModule.class)",
" | Builder |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/testresources/TestAddress.java | {
"start": 717,
"end": 1085
} | class ____{
private String street;
private List<String> crossStreets;
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
public List<String> getCrossStreets() {
return crossStreets;
}
public void setCrossStreets(List<String> crossStreets) {
this.crossStreets = crossStreets;
}
}
| TestAddress |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/subclassmapping/abstractsuperclass/VehicleCollection.java | {
"start": 280,
"end": 465
} | class ____ {
private Collection<AbstractVehicle> vehicles = new ArrayList<>();
public Collection<AbstractVehicle> getVehicles() {
return vehicles;
}
}
| VehicleCollection |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java | {
"start": 1768,
"end": 4847
} | class ____ {
public static File getFile(FsDatasetSpi<?> fsd, String bpid, long bid) {
ReplicaInfo r;
try {
r = ((FsDatasetImpl)fsd).getReplicaInfo(bpid, bid);
return new File(r.getBlockURI());
} catch (ReplicaNotFoundException e) {
FsDatasetImpl.LOG.warn(String.format(
"Replica with id %d was not found in block pool %s.", bid, bpid), e);
}
return null;
}
public static File getBlockFile(FsDatasetSpi<?> fsd, String bpid, Block b
) throws IOException {
ReplicaInfo r = ((FsDatasetImpl)fsd).getReplicaInfo(bpid, b.getBlockId());
return new File(r.getBlockURI());
}
public static File getMetaFile(FsDatasetSpi<?> fsd, String bpid, Block b)
throws IOException {
return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
.getGenerationStamp());
}
public static boolean breakHardlinksIfNeeded(FsDatasetSpi<?> fsd,
ExtendedBlock block) throws IOException {
final LocalReplica info =
(LocalReplica) ((FsDatasetImpl)fsd).getReplicaInfo(block);
return info.breakHardLinksIfNeeded();
}
public static ReplicaInfo fetchReplicaInfo (final FsDatasetSpi<?> fsd,
final String bpid, final long blockId) {
return ((FsDatasetImpl)fsd).fetchReplicaInfo(bpid, blockId);
}
public static Collection<ReplicaInfo> getReplicas(FsDatasetSpi<?> fsd,
String bpid) {
return ((FsDatasetImpl)fsd).volumeMap.replicas(bpid);
}
/**
* Stop the lazy writer daemon that saves RAM disk files to persistent storage.
* @param dn
*/
public static void stopLazyWriter(DataNode dn) {
FsDatasetImpl fsDataset = ((FsDatasetImpl) dn.getFSDataset());
((FsDatasetImpl.LazyWriter) fsDataset.lazyWriter.getRunnable()).stop();
}
/**
* Asserts that the storage lock file in the given directory has been
* released. This method works by trying to acquire the lock file itself. If
* locking fails here, then the main code must have failed to release it.
*
* @param dir the storage directory to check
* @throws IOException if there is an unexpected I/O error
*/
public static void assertFileLockReleased(String dir) throws IOException {
StorageLocation sl = StorageLocation.parse(dir);
File lockFile = new File(new File(sl.getUri()), Storage.STORAGE_FILE_LOCK);
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
FileChannel channel = raf.getChannel()) {
FileLock lock = channel.tryLock();
assertNotNull(lock, String.format(
"Lock file at %s appears to be held by a different process.",
lockFile.getAbsolutePath()));
if (lock != null) {
try {
lock.release();
} catch (IOException e) {
FsDatasetImpl.LOG.warn(String.format("I/O error releasing file lock %s.",
lockFile.getAbsolutePath()), e);
throw e;
}
}
} catch (OverlappingFileLockException e) {
fail(String.format("Must release lock file at %s.",
lockFile.getAbsolutePath()));
}
}
}
| FsDatasetTestUtil |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/embedded/Deal.java | {
"start": 340,
"end": 666
} | class ____ {
/**
* Deal ID.
*/
private String id;
@Id
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
/**
* Swap with the tenor.
*/
private Swap swap;
@Embedded
public Swap getSwap() {
return swap;
}
public void setSwap(Swap swap) {
this.swap = swap;
}
}
| Deal |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeotileFromStringEvaluator.java | {
"start": 1143,
"end": 4585
} | class ____ extends AbstractConvertFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ToGeotileFromStringEvaluator.class);
private final EvalOperator.ExpressionEvaluator in;
public ToGeotileFromStringEvaluator(Source source, EvalOperator.ExpressionEvaluator in,
DriverContext driverContext) {
super(driverContext, source);
this.in = in;
}
@Override
public EvalOperator.ExpressionEvaluator next() {
return in;
}
@Override
public Block evalVector(Vector v) {
BytesRefVector vector = (BytesRefVector) v;
int positionCount = v.getPositionCount();
BytesRef scratchPad = new BytesRef();
if (vector.isConstant()) {
try {
return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount);
} catch (IllegalArgumentException e) {
registerException(e);
return driverContext.blockFactory().newConstantNullBlock(positionCount);
}
}
try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
try {
builder.appendLong(evalValue(vector, p, scratchPad));
} catch (IllegalArgumentException e) {
registerException(e);
builder.appendNull();
}
}
return builder.build();
}
}
private long evalValue(BytesRefVector container, int index, BytesRef scratchPad) {
BytesRef value = container.getBytesRef(index, scratchPad);
return ToGeotile.fromString(value);
}
@Override
public Block evalBlock(Block b) {
BytesRefBlock block = (BytesRefBlock) b;
int positionCount = block.getPositionCount();
try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
BytesRef scratchPad = new BytesRef();
for (int p = 0; p < positionCount; p++) {
int valueCount = block.getValueCount(p);
int start = block.getFirstValueIndex(p);
int end = start + valueCount;
boolean positionOpened = false;
boolean valuesAppended = false;
for (int i = start; i < end; i++) {
try {
long value = evalValue(block, i, scratchPad);
if (positionOpened == false && valueCount > 1) {
builder.beginPositionEntry();
positionOpened = true;
}
builder.appendLong(value);
valuesAppended = true;
} catch (IllegalArgumentException e) {
registerException(e);
}
}
if (valuesAppended == false) {
builder.appendNull();
} else if (positionOpened) {
builder.endPositionEntry();
}
}
return builder.build();
}
}
private long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) {
BytesRef value = container.getBytesRef(index, scratchPad);
return ToGeotile.fromString(value);
}
@Override
public String toString() {
return "ToGeotileFromStringEvaluator[" + "in=" + in + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(in);
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += in.baseRamBytesUsed();
return baseRamBytesUsed;
}
public static | ToGeotileFromStringEvaluator |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/objectarrays/ObjectArrays_assertAreAtLeast_Test.java | {
"start": 1136,
"end": 2096
} | class ____ extends ObjectArraysWithConditionBaseTest {
@Test
void should_pass_if_satisfies_at_least_times_condition() {
arrays.assertAreAtLeast(INFO, array("Yoda", "Luke", "Leia"), 2, jedi);
}
@Test
void should_pass_if_all_satisfies_condition_() {
arrays.assertAreAtLeast(INFO, array("Yoda", "Luke", "Obiwan"), 2, jedi);
}
@Test
void should_throw_error_if_condition_is_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertAreAtLeast(INFO, array("Yoda", "Luke"), 2, null))
.withMessage("The condition to evaluate should not be null");
}
@Test
void should_fail_if_condition_is_not_met() {
// GIVEN
var actual = array("Yoda", "Solo", "Leia");
// WHEN
expectAssertionError(() -> arrays.assertAreAtLeast(INFO, actual, 2, jedi));
// THEN
verify(failures).failure(INFO, elementsShouldBeAtLeast(actual, 2, jedi));
}
}
| ObjectArrays_assertAreAtLeast_Test |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/writing/ComponentImplementation.java | {
"start": 31153,
"end": 40093
} | class ____ for the component based on the given name. */
public String getUniqueMethodName(String name) {
return componentMethodNames.getUniqueName(name);
}
/** Returns a new, unique method name for a getter method for the given request. */
String getUniqueMethodName(BindingRequest request) {
return uniqueMethodName(request, KeyVariableNamer.name(request.key()));
}
@Override
public String getUniqueClassName(String name) {
return componentClassNames.getUniqueName(name);
}
private String uniqueMethodName(BindingRequest request, String bindingName) {
// This name is intentionally made to match the name for fields in fastInit
// in order to reduce the constant pool size. b/162004246
String baseMethodName =
bindingName
+ (request.isRequestKind(RequestKind.INSTANCE)
? ""
: UPPER_UNDERSCORE.to(UPPER_CAMEL, request.kindName()));
return getUniqueMethodName(baseMethodName);
}
/**
* Gets the parameter name to use for the given requirement for this component, starting with
* the given base name if no parameter name has already been selected for the requirement.
*/
public String getParameterName(ComponentRequirement requirement) {
return toJavaPoet(constructorParameters.get(requirement)).name;
}
/** Claims a new method name for the component. Does nothing if method name already exists. */
public void claimMethodName(CharSequence name) {
componentMethodNames.claim(name);
}
public boolean isShardClassPrivate() {
return modifiers().contains(PRIVATE);
}
@Override
public XTypeSpec generate() {
XTypeSpecs.Builder builder = XTypeSpecs.classBuilder(name);
// Ksp requires explicitly associating input classes that are generated with the output class,
// otherwise, the cached generated classes won't be discoverable in an incremental build.
if (processingEnv.getBackend() == XProcessingEnv.Backend.KSP) {
graph.componentDescriptor().modules().stream()
.filter(ModuleDescriptor::isImplicitlyIncluded)
.forEach(module -> builder.addOriginatingElement(module.moduleElement()));
}
if (isComponentShard()) {
builder.superType(graph.componentTypeElement());
addCreator();
addFactoryMethods();
addInterfaceMethods();
addChildComponents();
addShards();
}
addConstructorAndInitializationMethods();
if (graph.componentDescriptor().isProduction()) {
if (isComponentShard() || !cancellations.isEmpty()) {
builder.superType(processingEnv.requireTypeElement(XTypeNames.CANCELLATION_LISTENER));
addCancellationListenerImplementation();
}
}
modifiers().forEach(builder::addModifiers);
fieldSpecsMap.asMap().values().forEach(builder::addProperties);
methodSpecsMap.asMap().values().forEach(builder::addFunctions);
typeSpecsMap.asMap().values().forEach(builder::addTypes);
typeSuppliers.stream().map(Supplier::get).forEach(builder::addType);
if (!compilerOptions.generatedClassExtendsComponent()
&& isComponentShard()
&& graph.componentPath().atRoot()) {
topLevelImplementation().addType(TypeSpecKind.COMPONENT_IMPL, builder.build());
return topLevelImplementation().generate();
}
return builder.build();
}
private ImmutableSet<Modifier> modifiers() {
return isNested() || !isComponentShard()
? ImmutableSet.of(PRIVATE, STATIC, FINAL)
: graph.componentTypeElement().isPublic()
// TODO(ronshapiro): perhaps all generated components should be non-public?
? ImmutableSet.of(PUBLIC, FINAL)
: ImmutableSet.of(FINAL);
}
private void addCreator() {
componentCreatorImplementationFactoryProvider
.get()
.create()
.map(ComponentCreatorImplementation::spec)
.ifPresent(
creator -> topLevelImplementation().addType(TypeSpecKind.COMPONENT_CREATOR, creator));
}
private void addFactoryMethods() {
if (parent.isPresent()) {
graph.factoryMethod().ifPresent(this::createSubcomponentFactoryMethod);
} else {
createRootComponentFactoryMethod();
}
}
private void createRootComponentFactoryMethod() {
checkState(!parent.isPresent());
// Top-level components have a static method that returns a builder or factory for the
// component. If the user defined a @Component.Builder or @Component.Factory, an
// implementation of their type is returned. Otherwise, an autogenerated Builder type is
// returned.
// TODO(cgdecker): Replace this abomination with a small class?
// Better yet, change things so that an autogenerated builder type has a descriptor of sorts
// just like a user-defined creator type.
ComponentCreatorKind creatorKind;
XClassName creatorType;
String factoryMethodName;
boolean noArgFactoryMethod;
Optional<ComponentCreatorDescriptor> creatorDescriptor =
graph.componentDescriptor().creatorDescriptor();
if (creatorDescriptor.isPresent()) {
ComponentCreatorDescriptor descriptor = creatorDescriptor.get();
creatorKind = descriptor.kind();
creatorType = descriptor.typeElement().asClassName();
factoryMethodName = getSimpleName(descriptor.factoryMethod());
noArgFactoryMethod = descriptor.factoryParameters().isEmpty();
} else {
creatorKind = BUILDER;
creatorType = getCreatorName();
factoryMethodName = "build";
noArgFactoryMethod = true;
}
validateMethodNameDoesNotOverrideGeneratedCreator(creatorKind.methodName());
claimMethodName(creatorKind.methodName());
topLevelImplementation()
.addMethod(
MethodSpecKind.BUILDER_METHOD,
XFunSpecs.methodBuilder(creatorKind.methodName())
.addModifiers(PUBLIC, STATIC)
.returns(creatorType)
.addStatement("return %L", XCodeBlock.ofNewInstance(getCreatorName(), ""))
.build());
if (noArgFactoryMethod && canInstantiateAllRequirements()) {
validateMethodNameDoesNotOverrideGeneratedCreator("create");
claimMethodName("create");
topLevelImplementation()
.addMethod(
MethodSpecKind.BUILDER_METHOD,
methodBuilder("create")
.returns(graph.componentTypeElement().asClassName())
.addModifiers(PUBLIC, STATIC)
.addStatement(
"return %L.%N()",
XCodeBlock.ofNewInstance(
topLevelImplementation().name().nestedClass(creatorKind.typeName()),
""),
factoryMethodName)
.build());
}
}
// TODO(bcorso): This can be removed once we delete generatedClassExtendsComponent flag.
private void validateMethodNameDoesNotOverrideGeneratedCreator(String creatorName) {
// Check if there is any client added method has the same signature as generated creatorName.
XTypeElements.getAllMethods(graph.componentTypeElement()).stream()
.filter(method -> getSimpleName(method).contentEquals(creatorName))
.filter(method -> method.getParameters().isEmpty())
.filter(method -> !method.isStatic())
.forEach(
(XMethodElement method) ->
messager.printMessage(
ERROR,
String.format(
"The method %s.%s() conflicts with a method of the same name Dagger is "
+ "trying to generate as a way to instantiate the component. Please "
+ "choose a different name for your method.",
method.getEnclosingElement().getClassName().canonicalName(),
getSimpleName(method))));
}
/** {@code true} if all of the graph's required dependencies can be automatically constructed */
private boolean canInstantiateAllRequirements() {
return !Iterables.any(
graph.componentRequirements(), ComponentRequirement::requiresAPassedInstance);
}
private void createSubcomponentFactoryMethod(XMethodElement factoryMethod) {
checkState(parent.isPresent());
XType parentType = parent.get().graph().componentTypeElement().getType();
XFunSpecs.Builder method = overriding(factoryMethod, parentType, compilerOptions);
// Use the parameter names from the overriding method, which may be different from the
// parameter names at the declaration site if it is pulled in as a | name |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/RequestScopedParamConverterTest.java | {
"start": 1555,
"end": 2022
} | class ____ {
public final String value;
public final String fooHeader;
public Model(String value, String fooHeader) {
this.value = value;
this.fooHeader = fooHeader;
}
// called automatically by RR based on the JAX-RS convention
public static Model valueOf(String value) {
return new Model(value, (String) CurrentRequestManager.get().getHeader("foo", true));
}
}
}
| Model |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/proxy/AuditedCollectionProxyTest.java | {
"start": 900,
"end": 2190
} | class ____ {
Integer id_ListRefEdEntity1;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
ListRefEdEntity listReferencedEntity1 = new ListRefEdEntity(
Integer.valueOf( 1 ), "str1"
);
ListRefIngEntity refingEntity1 = new ListRefIngEntity(
Integer.valueOf( 1 ), "refing1", listReferencedEntity1
);
// Revision 1
scope.inTransaction( em -> {
em.persist( listReferencedEntity1 );
em.persist( refingEntity1 );
} );
id_ListRefEdEntity1 = listReferencedEntity1.getId();
// Revision 2
ListRefIngEntity refingEntity2 = new ListRefIngEntity(
Integer.valueOf( 2 ), "refing2", listReferencedEntity1
);
scope.inTransaction( em -> {
em.persist( refingEntity2 );
} );
}
@Test
public void testProxyIdentifier(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> {
ListRefEdEntity listReferencedEntity1 = em.getReference(
ListRefEdEntity.class, id_ListRefEdEntity1
);
assertInstanceOf( HibernateProxy.class, listReferencedEntity1 );
// Revision 3
ListRefIngEntity refingEntity3 = new ListRefIngEntity(
Integer.valueOf( 3 ), "refing3", listReferencedEntity1
);
em.persist( refingEntity3 );
listReferencedEntity1.getReffering().size();
} );
}
}
| AuditedCollectionProxyTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/updatemethods/selection/ExternalHandWrittenMapper.java | {
"start": 681,
"end": 1778
} | class ____ {
public void toDepartmentEntity(DepartmentDto dto, @MappingTarget DepartmentEntity entity) {
if ( entity != null && dto != null ) {
entity.setName( dto.getName() );
}
}
public void toEmployeeEntityList(List<EmployeeDto> dtos, @MappingTarget List<EmployeeEntity> entities) {
if ( entities != null && dtos != null ) {
for ( EmployeeDto dto : dtos) {
entities.add( DepartmentMapper.INSTANCE.toEmployeeEntity( dto ) );
}
}
}
public void toSecretaryEmployeeEntityMap(Map<SecretaryDto, EmployeeDto> dtoMap,
@MappingTarget Map<SecretaryEntity, EmployeeEntity> entityMap) {
if ( entityMap != null && dtoMap != null ) {
for ( Map.Entry<SecretaryDto, EmployeeDto> dtoEntry : dtoMap.entrySet() ) {
entityMap.put(
DepartmentMapper.INSTANCE.toSecretaryEntity( dtoEntry.getKey() ),
DepartmentMapper.INSTANCE.toEmployeeEntity( dtoEntry.getValue() ) );
}
}
}
}
| ExternalHandWrittenMapper |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java | {
"start": 32529,
"end": 38132
} | class ____ {
public String previousSaslMechanism;
public KafkaPrincipal previousKafkaPrincipal;
public long reauthenticationBeginNanos;
public Long sessionExpirationTimeNanos;
public boolean connectedClientSupportsReauthentication;
public long authenticationEndNanos;
public String badMechanismErrorMessage;
public void reauthenticating(String previousSaslMechanism, KafkaPrincipal previousKafkaPrincipal,
long reauthenticationBeginNanos) {
this.previousSaslMechanism = Objects.requireNonNull(previousSaslMechanism);
this.previousKafkaPrincipal = Objects.requireNonNull(previousKafkaPrincipal);
this.reauthenticationBeginNanos = reauthenticationBeginNanos;
}
public boolean reauthenticating() {
return previousSaslMechanism != null;
}
public String authenticationOrReauthenticationText() {
return reauthenticating() ? "re-authentication" : "authentication";
}
public void ensurePrincipalUnchanged(KafkaPrincipal reauthenticatedKafkaPrincipal) throws SaslAuthenticationException {
if (!previousKafkaPrincipal.equals(reauthenticatedKafkaPrincipal)) {
throw new SaslAuthenticationException(String.format(
"Cannot change principals during re-authentication from %s.%s: %s.%s",
previousKafkaPrincipal.getPrincipalType(), previousKafkaPrincipal.getName(),
reauthenticatedKafkaPrincipal.getPrincipalType(), reauthenticatedKafkaPrincipal.getName()));
}
}
/*
* We define the REAUTH_BAD_MECHANISM state because the failed re-authentication
* metric does not get updated if we send back an error immediately upon the
* start of re-authentication.
*/
public boolean saslMechanismUnchanged(String clientMechanism) {
if (previousSaslMechanism.equals(clientMechanism))
return true;
badMechanismErrorMessage = String.format(
"SASL mechanism '%s' requested by client is not supported for re-authentication of mechanism '%s'",
clientMechanism, previousSaslMechanism);
LOG.debug(badMechanismErrorMessage);
setSaslState(SaslState.REAUTH_BAD_MECHANISM);
return false;
}
private long calcCompletionTimesAndReturnSessionLifetimeMs() {
long retvalSessionLifetimeMs = 0L;
long authenticationEndMs = time.milliseconds();
authenticationEndNanos = time.nanoseconds();
Long credentialExpirationMs = (Long) saslServer
.getNegotiatedProperty(SaslInternalConfigs.CREDENTIAL_LIFETIME_MS_SASL_NEGOTIATED_PROPERTY_KEY);
Long connectionsMaxReauthMs = connectionsMaxReauthMsByMechanism.get(saslMechanism);
boolean maxReauthSet = connectionsMaxReauthMs != null && connectionsMaxReauthMs > 0;
if (credentialExpirationMs != null || maxReauthSet) {
if (credentialExpirationMs == null)
retvalSessionLifetimeMs = zeroIfNegative(connectionsMaxReauthMs);
else if (!maxReauthSet)
retvalSessionLifetimeMs = zeroIfNegative(credentialExpirationMs - authenticationEndMs);
else
retvalSessionLifetimeMs = zeroIfNegative(Math.min(credentialExpirationMs - authenticationEndMs, connectionsMaxReauthMs));
sessionExpirationTimeNanos = Math.addExact(authenticationEndNanos, Utils.msToNs(retvalSessionLifetimeMs));
}
if (credentialExpirationMs != null) {
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, credential expiration={} ({} ms); session expiration = {} ({} ms), sending {} ms to client",
connectionsMaxReauthMs, new Date(credentialExpirationMs),
credentialExpirationMs - authenticationEndMs,
new Date(authenticationEndMs + retvalSessionLifetimeMs), retvalSessionLifetimeMs,
retvalSessionLifetimeMs);
} else {
if (sessionExpirationTimeNanos != null)
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, no credential expiration; session expiration = {} ({} ms), sending {} ms to client",
connectionsMaxReauthMs, new Date(authenticationEndMs + retvalSessionLifetimeMs),
retvalSessionLifetimeMs, retvalSessionLifetimeMs);
else
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, no credential expiration; no session expiration, sending 0 ms to client",
connectionsMaxReauthMs);
}
return retvalSessionLifetimeMs;
}
public Long reauthenticationLatencyMs() {
if (!reauthenticating())
return null;
// record at least 1 ms if there is some latency
long latencyNanos = authenticationEndNanos - reauthenticationBeginNanos;
return latencyNanos == 0L ? 0L : Math.max(1L, Math.round(latencyNanos / 1000.0 / 1000.0));
}
private long zeroIfNegative(long value) {
return Math.max(0L, value);
}
}
}
| ReauthInfo |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/runtime/src/main/java/io/quarkus/resteasy/runtime/standalone/VertxHttpResponse.java | {
"start": 769,
"end": 6891
} | class ____ implements HttpResponse {
private int status = 200;
private OutputStream os;
private MultivaluedMap<String, Object> outputHeaders;
final HttpServerRequest request;
final HttpServerResponse response;
private boolean committed;
private boolean finished;
private ResteasyProviderFactory providerFactory;
private final HttpMethod method;
private final VertxOutput output;
private final RoutingContext routingContext;
public VertxHttpResponse(HttpServerRequest request, ResteasyProviderFactory providerFactory,
final HttpMethod method, BufferAllocator allocator, VertxOutput output, RoutingContext routingContext) {
this.routingContext = routingContext;
outputHeaders = new MultivaluedHashMap<String, Object>();
this.method = method;
os = (method == null || !method.equals(HttpMethod.HEAD)) ? new VertxOutputStream(this, allocator)
: null;
this.request = request;
this.response = request.response();
this.providerFactory = providerFactory;
this.output = output;
}
@Override
public void setOutputStream(OutputStream os) {
this.os = os;
}
@Override
public int getStatus() {
return status;
}
@Override
public void setStatus(int status) {
this.status = status;
}
@Override
public MultivaluedMap<String, Object> getOutputHeaders() {
return outputHeaders;
}
@Override
public OutputStream getOutputStream() throws IOException {
return os;
}
@Override
public void addNewCookie(NewCookie cookie) {
outputHeaders.add(jakarta.ws.rs.core.HttpHeaders.SET_COOKIE, cookie);
}
void checkException() throws IOException {
// todo from old code, do we still need it?
}
@Override
public void sendError(int status) throws IOException {
checkException();
sendError(status, null);
}
@Override
public void sendError(int status, String message) throws IOException {
checkException();
if (committed) {
throw new IllegalStateException();
}
response.setStatusCode(status);
if (message != null) {
response.end(message);
} else {
response.end();
}
committed = true;
}
@Override
public boolean isCommitted() {
return committed;
}
@Override
public void reset() {
if (committed) {
throw new IllegalStateException("Response already committed");
}
outputHeaders.clear();
}
private void transformHeaders() {
getOutputHeaders().forEach(this::transformHeadersList);
}
private void transformHeadersList(final String key, final List<Object> valueList) {
final MultiMap headers = response.headers();
for (Object value : valueList) {
if (value == null) {
headers.add(key, "");
} else {
RuntimeDelegate.HeaderDelegate delegate = providerFactory.getHeaderDelegate(value.getClass());
if (delegate != null) {
headers.add(key, delegate.toString(value));
} else {
headers.add(key, value.toString());
}
}
}
}
public void finish() throws IOException {
checkException();
if (finished || response.ended() || response.closed()) {
if (os != null) {
try {
os.close();
os = null;
} catch (Exception ignored) {
}
}
return;
}
try {
if (os != null) {
os.close(); // this will end() vertx response
} else {
committed = true;
response.setStatusCode(getStatus());
transformHeaders();
routingContext.addHeadersEndHandler(h -> {
response.headers().remove(HttpHeaders.CONTENT_LENGTH);
response.headers().set(HttpHeaders.CONNECTION, HttpHeaders.KEEP_ALIVE);
});
response.end();
}
} finally {
finished = true;
}
}
@Override
public void flushBuffer() throws IOException {
checkException();
if (os != null) {
os.flush();
}
}
public void writeBlocking(ByteBuf buffer, boolean finished) throws IOException {
checkException();
prepareWrite(buffer, finished);
output.write(buffer, finished);
}
public CompletionStage<Void> writeNonBlocking(ByteBuf buffer, boolean finished) {
try {
prepareWrite(buffer, finished);
} catch (IOException e) {
CompletableFuture<Void> ret = new CompletableFuture<>();
ret.completeExceptionally(e);
return ret;
}
return output.writeNonBlocking(buffer, finished);
}
private void prepareWrite(ByteBuf buffer, boolean finished) throws IOException {
if (!isCommitted()) {
committed = true;
response.setStatusCode(getStatus());
transformHeaders();
if (finished) {
boolean explicitChunked = "chunked".equalsIgnoreCase(response.headers().get("transfer-encoding"));
if (!explicitChunked) {
if (buffer == null) {
getOutputHeaders().putSingle(jakarta.ws.rs.core.HttpHeaders.CONTENT_LENGTH, "0");
} else {
getOutputHeaders().putSingle(jakarta.ws.rs.core.HttpHeaders.CONTENT_LENGTH,
"" + buffer.readableBytes());
}
}
} else if (!response.headers().contains(jakarta.ws.rs.core.HttpHeaders.CONTENT_LENGTH)) {
response.setChunked(true);
}
}
if (finished)
this.finished = true;
}
}
| VertxHttpResponse |
java | google__dagger | dagger-producers/main/java/dagger/producers/monitoring/internal/Monitors.java | {
"start": 7539,
"end": 8988
} | class ____ extends ProductionComponentMonitor.Factory {
private final ImmutableList<? extends ProductionComponentMonitor.Factory> delegates;
Factory(Iterable<? extends ProductionComponentMonitor.Factory> delegates) {
this.delegates = ImmutableList.copyOf(delegates);
}
@Override
public ProductionComponentMonitor create(Object component) {
ImmutableList.Builder<ProductionComponentMonitor> monitorsBuilder = ImmutableList.builder();
for (ProductionComponentMonitor.Factory delegate : delegates) {
try {
ProductionComponentMonitor monitor = delegate.create(component);
if (monitor != null) {
monitorsBuilder.add(monitor);
}
} catch (RuntimeException e) {
logCreateException(e, delegate, component);
}
}
ImmutableList<ProductionComponentMonitor> monitors = monitorsBuilder.build();
if (monitors.isEmpty()) {
return ProductionComponentMonitor.noOp();
} else if (monitors.size() == 1) {
return new NonThrowingProductionComponentMonitor(Iterables.getOnlyElement(monitors));
} else {
return new DelegatingProductionComponentMonitor(monitors);
}
}
}
}
/**
* A producer monitor that delegates to several monitors, and catches and logs all exceptions
* that the delegates throw.
*/
private static final | Factory |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkV2MetricsITCase.java | {
"start": 13134,
"end": 14076
} | class ____ extends TestSinkV2.DefaultSinkWriter<Long> {
static final long BASE_SEND_TIME = 100;
static final long RECORD_SIZE_IN_BYTES = 10;
private SinkWriterMetricGroup metricGroup;
private long sendTime;
@Override
public void init(WriterInitContext context) {
this.metricGroup = context.metricGroup();
metricGroup.setCurrentSendTimeGauge(() -> sendTime);
}
@Override
public void write(Long element, Context context) {
super.write(element, context);
sendTime = element * BASE_SEND_TIME;
metricGroup.getIOMetricGroup().getNumRecordsOutCounter().inc();
if (element % 2 == 0) {
metricGroup.getNumRecordsOutErrorsCounter().inc();
}
metricGroup.getIOMetricGroup().getNumBytesOutCounter().inc(RECORD_SIZE_IN_BYTES);
}
}
private static | MetricWriter |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3200/Issue3246.java | {
"start": 798,
"end": 2466
} | class ____ {
@JSONField(name = "d_id", ordinal = 0)
private String deviceId;
@JSONField(name = "c_id", ordinal = 1)
private Integer commodityId;
@JSONField(name = "o_$", ordinal = 2)
private Double orderPrice;
@JSONField(name = "am", ordinal = 3)
private Integer amount;
@JSONField(name = "$_tp", ordinal = 4)
private String payType;
@JSONField(name = "wx_p_id", ordinal = 5)
private Long productId;
@JSONField(name = "ext_p_id", ordinal = 6)
private Long extraProductId;
@JSONField(name = "u_id", ordinal = 7)
private String userId;
@JSONField(name = "p_id", ordinal = 8)
private Long parentId;
@JSONField(name = "o_t", ordinal = 9)
private Integer orderType;
@JSONField(name = "ts", ordinal = 10)
private Integer tradeStatus;
@JSONField(name = "pn", ordinal = 11)
private String phoneNum;
@JSONField(name = "conf_id", ordinal = 12)
private Long configId;
@JSONField(name = "sku_id", ordinal = 13)
private Long skuCommodityId;
@JSONField(name = "c_ids", ordinal = 14)
private String commodityIds;
@JSONField(name = "a_m", ordinal = 15)
private String addMoney;
@JSONField(name = "skr_id", ordinal = 15)
private Long secKillRecordId;
@JSONField(name = "c_n", ordinal = 16)
private String clientOrderNum;
@JSONField(name = "s_t", ordinal = 16)
private Integer sceneType;
@JSONField(name = "t_t", ordinal = 16)
private Integer tradingType;
}
}
| Order |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/OperatorStateBackendTest.java | {
"start": 14637,
"end": 17509
} | class ____ extends TypeSerializer<Integer> {
private static final long serialVersionUID = -5344563614550163898L;
private transient ClassLoader classLoader;
private transient AtomicInteger atomicInteger;
private VerifyingIntSerializer(ClassLoader classLoader, AtomicInteger atomicInteger) {
this.classLoader = Preconditions.checkNotNull(classLoader);
this.atomicInteger = Preconditions.checkNotNull(atomicInteger);
}
@Override
public boolean isImmutableType() {
// otherwise the copy method won't be called for the deepCopy operation
return false;
}
@Override
public TypeSerializer<Integer> duplicate() {
return this;
}
@Override
public Integer createInstance() {
return 0;
}
@Override
public Integer copy(Integer from) {
assertThat(classLoader).isEqualTo(Thread.currentThread().getContextClassLoader());
atomicInteger.incrementAndGet();
return IntSerializer.INSTANCE.copy(from);
}
@Override
public Integer copy(Integer from, Integer reuse) {
assertThat(classLoader).isEqualTo(Thread.currentThread().getContextClassLoader());
atomicInteger.incrementAndGet();
return IntSerializer.INSTANCE.copy(from, reuse);
}
@Override
public int getLength() {
return IntSerializer.INSTANCE.getLength();
}
@Override
public void serialize(Integer record, DataOutputView target) throws IOException {
IntSerializer.INSTANCE.serialize(record, target);
}
@Override
public Integer deserialize(DataInputView source) throws IOException {
return IntSerializer.INSTANCE.deserialize(source);
}
@Override
public Integer deserialize(Integer reuse, DataInputView source) throws IOException {
return IntSerializer.INSTANCE.deserialize(reuse, source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
assertThat(classLoader).isEqualTo(Thread.currentThread().getContextClassLoader());
atomicInteger.incrementAndGet();
IntSerializer.INSTANCE.copy(source, target);
}
@Override
public boolean equals(Object obj) {
return obj instanceof VerifyingIntSerializer;
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public TypeSerializerSnapshot<Integer> snapshotConfiguration() {
return new VerifyingIntSerializerSnapshot();
}
}
@SuppressWarnings("WeakerAccess")
public static | VerifyingIntSerializer |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/collection_in_constructor/Store7.java | {
"start": 771,
"end": 1850
} | class ____ {
private final Integer id;
private final List<String> aisleNames;
private final List<String> clerkNames;
public Store7(Integer id, List<String> aisleNames, List<String> clerkNames) {
super();
this.id = id;
this.aisleNames = aisleNames;
this.clerkNames = clerkNames;
}
public Integer getId() {
return id;
}
public List<String> getAisleNames() {
return aisleNames;
}
public List<String> getClerkNames() {
return clerkNames;
}
@Override
public int hashCode() {
return Objects.hash(clerkNames, id, aisleNames);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Store7)) {
return false;
}
Store7 other = (Store7) obj;
return Objects.equals(clerkNames, other.clerkNames) && Objects.equals(id, other.id)
&& Objects.equals(aisleNames, other.aisleNames);
}
@Override
public String toString() {
return "Store7 [id=" + id + ", aisleNames=" + aisleNames + ", clerkNames=" + clerkNames + "]";
}
}
| Store7 |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/rbfbalance/TestRouterDistCpProcedure.java | {
"start": 2614,
"end": 5284
} | class ____ extends TestDistCpProcedure {
private static StateStoreDFSCluster cluster;
private static MiniRouterDFSCluster.RouterContext routerContext;
private static Configuration routerConf;
private static StateStoreService stateStore;
@BeforeAll
public static void globalSetUp() throws Exception {
cluster = new StateStoreDFSCluster(false, 1);
// Build and start a router with State Store + admin + RPC
Configuration conf = new RouterConfigBuilder()
.stateStore()
.admin()
.rpc()
.build();
cluster.addRouterOverrides(conf);
cluster.startRouters();
routerContext = cluster.getRandomRouter();
Router router = routerContext.getRouter();
stateStore = router.getStateStore();
// Add one name services for testing
ActiveNamenodeResolver membership = router.getNamenodeResolver();
membership.registerNamenode(createNamenodeReport("ns0", "nn1",
HAServiceProtocol.HAServiceState.ACTIVE));
stateStore.refreshCaches(true);
routerConf = new Configuration();
InetSocketAddress routerSocket = router.getAdminServerAddress();
routerConf.setSocketAddr(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
routerSocket);
}
@Override
public void testDisableWrite() throws Exception {
// Firstly add mount entry: /test-write->{ns0,/test-write}.
String mount = "/test-write";
MountTable newEntry = MountTable
.newInstance(mount, Collections.singletonMap("ns0", mount),
Time.now(), Time.now());
MountTableManager mountTable =
routerContext.getAdminClient().getMountTableManager();
AddMountTableEntryRequest addRequest =
AddMountTableEntryRequest.newInstance(newEntry);
AddMountTableEntryResponse addResponse =
mountTable.addMountTableEntry(addRequest);
assertTrue(addResponse.getStatus());
stateStore.loadCache(MountTableStoreImpl.class, true); // load cache.
// Construct client.
URI address = routerContext.getFileSystemURI();
DFSClient routerClient = new DFSClient(address, routerConf);
FedBalanceContext context = new FedBalanceContext
.Builder(null, null, mount, routerConf).build();
RouterDistCpProcedure dcProcedure = new RouterDistCpProcedure();
executeProcedure(dcProcedure, Stage.FINAL_DISTCP,
() -> dcProcedure.disableWrite(context));
intercept(RemoteException.class, "is in a read only mount point",
"Expect readonly exception.", () -> routerClient
.mkdirs(mount + "/dir", new FsPermission(020), false));
}
@AfterAll
public static void tearDown() {
cluster.stopRouter(routerContext);
}
}
| TestRouterDistCpProcedure |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/service/registry/HttpServiceGroup.java | {
"start": 1505,
"end": 1978
} | enum ____ {
/**
* A group backed by {@link org.springframework.web.client.RestClient}.
*/
REST_CLIENT,
/**
* A group backed by {@link org.springframework.web.reactive.function.client.WebClient}.
*/
WEB_CLIENT,
/**
* Not specified, falling back on a default.
* @see ImportHttpServices#clientType()
* @see AbstractHttpServiceRegistrar#setDefaultClientType
*/
UNSPECIFIED;
/**
* Shortcut to check if this is the UNSPECIFIED | ClientType |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/LookupJoinHarnessTest.java | {
"start": 11954,
"end": 13209
} | class ____ implements FlatMapFunction<RowData, RowData> {
private static final long serialVersionUID = 4018474964018227081L;
private static final Map<Integer, List<GenericRowData>> data = new HashMap<>();
static {
data.put(1, Collections.singletonList(GenericRowData.of(1, fromString("Julian"))));
data.put(
3,
Arrays.asList(
GenericRowData.of(3, fromString("Jark")),
GenericRowData.of(3, fromString("Jackson"))));
data.put(4, Collections.singletonList(GenericRowData.of(4, fromString("Fabian"))));
}
@Override
public void flatMap(RowData value, Collector<RowData> out) throws Exception {
int id = value.getInt(0);
List<GenericRowData> rows = data.get(id);
if (rows != null) {
for (GenericRowData row : rows) {
out.collect(row);
}
}
}
}
/**
* The {@link TestingFetcherCollector} is a simple implementation of {@link
* TableFunctionCollector} which combines left and right into a JoinedRowData.
*/
public static final | TestingFetcherFunction |
java | spring-projects__spring-boot | module/spring-boot-liquibase/src/main/java/org/springframework/boot/liquibase/actuate/endpoint/LiquibaseEndpoint.java | {
"start": 5560,
"end": 7811
} | class ____ {
private final String author;
private final String changeLog;
private final String comments;
private final Set<String> contexts;
private final Instant dateExecuted;
private final String deploymentId;
private final String description;
private final ExecType execType;
private final String id;
private final Set<String> labels;
private final @Nullable String checksum;
private final Integer orderExecuted;
private final String tag;
public ChangeSetDescriptor(RanChangeSet ranChangeSet) {
this.author = ranChangeSet.getAuthor();
this.changeLog = ranChangeSet.getChangeLog();
this.comments = ranChangeSet.getComments();
this.contexts = ranChangeSet.getContextExpression().getContexts();
this.dateExecuted = Instant.ofEpochMilli(ranChangeSet.getDateExecuted().getTime());
this.deploymentId = ranChangeSet.getDeploymentId();
this.description = ranChangeSet.getDescription();
this.execType = ranChangeSet.getExecType();
this.id = ranChangeSet.getId();
this.labels = ranChangeSet.getLabels().getLabels();
this.checksum = ((ranChangeSet.getLastCheckSum() != null) ? ranChangeSet.getLastCheckSum().toString()
: null);
this.orderExecuted = ranChangeSet.getOrderExecuted();
this.tag = ranChangeSet.getTag();
}
public String getAuthor() {
return this.author;
}
public String getChangeLog() {
return this.changeLog;
}
public String getComments() {
return this.comments;
}
public Set<String> getContexts() {
return this.contexts;
}
public Instant getDateExecuted() {
return this.dateExecuted;
}
public String getDeploymentId() {
return this.deploymentId;
}
public String getDescription() {
return this.description;
}
public ExecType getExecType() {
return this.execType;
}
public String getId() {
return this.id;
}
public Set<String> getLabels() {
return this.labels;
}
public @Nullable String getChecksum() {
return this.checksum;
}
public Integer getOrderExecuted() {
return this.orderExecuted;
}
public String getTag() {
return this.tag;
}
}
/**
* Description of a context expression in a {@link ChangeSetDescriptor}.
*/
public static | ChangeSetDescriptor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StaticQualifiedUsingExpressionTest.java | {
"start": 13311,
"end": 13554
} | class ____ {
static Object x;
void f() {
Object x = C.x;
}
void g() {
Object y = x;
}
}
""")
.doTest();
}
}
| C |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/EnumCreatorTest.java | {
"start": 4253,
"end": 4581
} | class ____ extends SimpleModule
{
private static final long serialVersionUID = 1L;
@Override
public void setupModule(final SetupContext context) {
context.addDeserializers(new DelegatingDeserializers());
}
}
// [databind#929]: support multi-arg | DelegatingDeserializersModule |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/test/java/io/quarkus/kubernetes/deployment/KubernetesConfigFallbackTest.java | {
"start": 586,
"end": 3121
} | class ____ {
@Test
void fallback() {
SmallRyeConfig config = new SmallRyeConfigBuilder()
.withSources(
inClassPath("application-kubernetes.properties", 250, Thread.currentThread().getContextClassLoader()))
.addDiscoveredCustomizers()
.withConverter(Duration.class, 100, new DurationConverter())
.withMappingIgnore("quarkus.**")
.withMapping(KubernetesConfig.class)
.withMapping(OpenShiftConfig.class)
.withMapping(KnativeConfig.class)
.build();
KubernetesConfig kubernetes = config.getConfigMapping(KubernetesConfig.class);
OpenShiftConfig openShift = config.getConfigMapping(OpenShiftConfig.class);
KnativeConfig knative = config.getConfigMapping(KnativeConfig.class);
assertTrue(kubernetes.name().isPresent());
assertTrue(openShift.name().isPresent());
assertEquals("naruto", kubernetes.name().get());
assertEquals("sasuke", openShift.name().get());
assertEquals(knative.name(), kubernetes.name());
assertEquals(kubernetes.partOf(), openShift.partOf());
for (Map.Entry<String, String> entry : kubernetes.labels().entrySet()) {
assertTrue(openShift.labels().containsKey(entry.getKey()));
assertEquals(openShift.labels().get(entry.getKey()), entry.getValue());
assertTrue(knative.labels().containsKey(entry.getKey()));
assertEquals(knative.labels().get(entry.getKey()), entry.getValue());
}
}
@Test
void sharedOnlyBetweenKubernetesAndOpenshift() {
SmallRyeConfig config = new SmallRyeConfigBuilder()
.addDiscoveredCustomizers()
.withConverter(Duration.class, 100, new DurationConverter())
.withMappingIgnore("quarkus.**")
.withMapping(KubernetesConfig.class)
.withMapping(OpenShiftConfig.class)
.withMapping(KnativeConfig.class)
.withSources(new PropertiesConfigSource(Map.of("quarkus.kubernetes.init-task-defaults.enabled", "false"), ""))
.build();
KubernetesConfig kubernetes = config.getConfigMapping(KubernetesConfig.class);
OpenShiftConfig openShift = config.getConfigMapping(OpenShiftConfig.class);
assertFalse(kubernetes.initTaskDefaults().enabled());
assertFalse(openShift.initTaskDefaults().enabled());
}
}
| KubernetesConfigFallbackTest |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java | {
"start": 860,
"end": 4393
} | class ____ extends ESTestCase {
private RemoveProcessor.Factory factory;
@Before
public void init() {
factory = new RemoveProcessor.Factory(TestTemplateService.instance());
}
public void testCreate() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "field1");
String processorTag = randomAlphaOfLength(10);
RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config, null);
assertThat(removeProcessor.getTag(), equalTo(processorTag));
assertThat(removeProcessor.getFieldsToRemove().get(0).newInstance(Map.of()).execute(), equalTo("field1"));
}
public void testCreateKeepField() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("keep", List.of("field1", "field2"));
String processorTag = randomAlphaOfLength(10);
RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config, null);
assertThat(removeProcessor.getTag(), equalTo(processorTag));
assertThat(removeProcessor.getFieldsToKeep().get(0).newInstance(Map.of()).execute(), equalTo("field1"));
assertThat(removeProcessor.getFieldsToKeep().get(1).newInstance(Map.of()).execute(), equalTo("field2"));
}
public void testCreateMultipleFields() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", List.of("field1", "field2"));
String processorTag = randomAlphaOfLength(10);
RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config, null);
assertThat(removeProcessor.getTag(), equalTo(processorTag));
assertThat(
removeProcessor.getFieldsToRemove().stream().map(template -> template.newInstance(Map.of()).execute()).toList(),
equalTo(List.of("field1", "field2"))
);
}
public void testCreateMissingField() throws Exception {
Map<String, Object> config = new HashMap<>();
try {
factory.create(null, null, null, config, null);
fail("factory create should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[keep] or [field] must be specified"));
}
}
public void testCreateTooManyFields() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "field1");
config.put("keep", "field2");
try {
factory.create(null, null, null, config, null);
fail("factory create should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[keep] and [field] cannot both be used in the same processor"));
}
}
public void testInvalidMustacheTemplate() throws Exception {
factory = new RemoveProcessor.Factory(TestTemplateService.instance(true));
Map<String, Object> config = new HashMap<>();
config.put("field", "{{field1}}");
String processorTag = randomAlphaOfLength(10);
ElasticsearchException exception = expectThrows(
ElasticsearchException.class,
() -> factory.create(null, processorTag, null, config, null)
);
assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script"));
assertThat(exception.getMetadata("es.processor_tag").get(0), equalTo(processorTag));
}
}
| RemoveProcessorFactoryTests |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableRefCountTest.java | {
"start": 37550,
"end": 41838
} | class ____ extends ConnectableObservable<Object>
implements Disposable {
@Override
public void connect(Consumer<? super Disposable> connection) {
try {
connection.accept(Disposable.empty());
} catch (Throwable ex) {
throw ExceptionHelper.wrapOrThrow(ex);
}
}
@Override
public void reset() {
// nothing to do in this test
}
@Override
protected void subscribeActual(Observer<? super Object> observer) {
observer.onSubscribe(Disposable.empty());
observer.onSubscribe(Disposable.empty());
observer.onComplete();
observer.onComplete();
observer.onError(new TestException());
}
@Override
public void dispose() {
}
@Override
public boolean isDisposed() {
return false;
}
}
@Test
public void doubleOnX() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadObservableDoubleOnX()
.refCount()
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void doubleOnXCount() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadObservableDoubleOnX()
.refCount(1)
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void doubleOnXTime() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadObservableDoubleOnX()
.refCount(5, TimeUnit.SECONDS, Schedulers.single())
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void cancelTerminateStateExclusion() {
ObservableRefCount<Object> o = (ObservableRefCount<Object>)PublishSubject.create()
.publish()
.refCount();
o.cancel(new RefConnection(o));
RefConnection rc = new RefConnection(o);
o.connection = null;
rc.subscriberCount = 0;
o.timeout(rc);
rc.subscriberCount = 1;
o.timeout(rc);
o.connection = rc;
o.timeout(rc);
rc.subscriberCount = 0;
o.timeout(rc);
// -------------------
rc.subscriberCount = 2;
rc.connected = false;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 1;
rc.connected = false;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 2;
rc.connected = true;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 1;
rc.connected = true;
o.connection = rc;
rc.lazySet(null);
o.cancel(rc);
o.connection = rc;
o.cancel(new RefConnection(o));
}
@Test
public void replayRefCountShallBeThreadSafe() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
Observable<Integer> observable = Observable.just(1).replay(1).refCount();
TestObserver<Integer> observer1 = observable
.subscribeOn(Schedulers.io())
.test();
TestObserver<Integer> observer2 = observable
.subscribeOn(Schedulers.io())
.test();
observer1
.withTag("" + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
observer2
.withTag("" + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
}
}
static final | BadObservableDoubleOnX |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 9421,
"end": 10001
} | class ____ get the abbreviated name for, may be {@code null}.
* @param lengthHint the desired length of the abbreviated name.
* @return the abbreviated name or an empty string.
* @throws IllegalArgumentException if len <= 0.
* @see #getAbbreviatedName(String, int)
* @since 3.4
*/
public static String getAbbreviatedName(final Class<?> cls, final int lengthHint) {
if (cls == null) {
return StringUtils.EMPTY;
}
return getAbbreviatedName(cls.getName(), lengthHint);
}
/**
* Gets the abbreviated | to |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/ApplicationProtocolNegotiationHandler.java | {
"start": 2645,
"end": 8380
} | class ____ extends ChannelInboundHandlerAdapter {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(ApplicationProtocolNegotiationHandler.class);
private final String fallbackProtocol;
private final RecyclableArrayList bufferedMessages = RecyclableArrayList.newInstance();
private ChannelHandlerContext ctx;
private boolean sslHandlerChecked;
/**
* Creates a new instance with the specified fallback protocol name.
*
* @param fallbackProtocol the name of the protocol to use when
* ALPN/NPN negotiation fails or the client does not support ALPN/NPN
*/
protected ApplicationProtocolNegotiationHandler(String fallbackProtocol) {
this.fallbackProtocol = ObjectUtil.checkNotNull(fallbackProtocol, "fallbackProtocol");
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
this.ctx = ctx;
super.handlerAdded(ctx);
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
fireBufferedMessages();
bufferedMessages.recycle();
super.handlerRemoved(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
// Let's buffer all data until this handler will be removed from the pipeline.
bufferedMessages.add(msg);
if (!sslHandlerChecked) {
sslHandlerChecked = true;
if (ctx.pipeline().get(SslHandler.class) == null) {
// Just remove ourself if there is no SslHandler in the pipeline and so we would otherwise
// buffer forever.
removeSelfIfPresent(ctx);
}
}
}
/**
* Process all backlog into pipeline from List.
*/
private void fireBufferedMessages() {
if (!bufferedMessages.isEmpty()) {
for (int i = 0; i < bufferedMessages.size(); i++) {
ctx.fireChannelRead(bufferedMessages.get(i));
}
ctx.fireChannelReadComplete();
bufferedMessages.clear();
}
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent handshakeEvent = (SslHandshakeCompletionEvent) evt;
try {
if (handshakeEvent.isSuccess()) {
SslHandler sslHandler = ctx.pipeline().get(SslHandler.class);
if (sslHandler == null) {
throw new IllegalStateException("cannot find an SslHandler in the pipeline (required for "
+ "application-level protocol negotiation)");
}
String protocol = sslHandler.applicationProtocol();
configurePipeline(ctx, protocol != null ? protocol : fallbackProtocol);
} else {
// if the event is not produced because of an successful handshake we will receive the same
// exception in exceptionCaught(...) and handle it there. This will allow us more fine-grained
// control over which exception we propagate down the ChannelPipeline.
//
// See https://github.com/netty/netty/issues/10342
}
} catch (Throwable cause) {
exceptionCaught(ctx, cause);
} finally {
// Handshake failures are handled in exceptionCaught(...).
if (handshakeEvent.isSuccess()) {
removeSelfIfPresent(ctx);
}
}
}
if (evt instanceof ChannelInputShutdownEvent) {
fireBufferedMessages();
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
fireBufferedMessages();
super.channelInactive(ctx);
}
private void removeSelfIfPresent(ChannelHandlerContext ctx) {
ChannelPipeline pipeline = ctx.pipeline();
if (!ctx.isRemoved()) {
pipeline.remove(this);
}
}
/**
* Invoked on successful initial SSL/TLS handshake. Implement this method to configure your pipeline
* for the negotiated application-level protocol.
*
* @param protocol the name of the negotiated application-level protocol, or
* the fallback protocol name specified in the constructor call if negotiation failed or the client
* isn't aware of ALPN/NPN extension
*/
protected abstract void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception;
/**
* Invoked on failed initial SSL/TLS handshake.
*/
protected void handshakeFailure(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.warn("{} TLS handshake failed:", ctx.channel(), cause);
ctx.close();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Throwable wrapped;
if (cause instanceof DecoderException && ((wrapped = cause.getCause()) instanceof SSLException)) {
try {
handshakeFailure(ctx, wrapped);
return;
} finally {
removeSelfIfPresent(ctx);
}
}
logger.warn("{} Failed to select the application-level protocol:", ctx.channel(), cause);
ctx.fireExceptionCaught(cause);
ctx.close();
}
}
| ApplicationProtocolNegotiationHandler |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/DelegatingArrayCreatorsTest.java | {
"start": 1826,
"end": 2069
} | class ____ {
public String value;
public Value2324(String v) { value = v; }
@Override
public boolean equals(Object o) {
return value.equals(((Value2324) o).value);
}
}
static | Value2324 |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3703ExecutionProjectWithRelativePathsTest.java | {
"start": 1180,
"end": 2638
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
public void testForkFromMojo() throws Exception {
File testDir = extractResources("/mng-3703");
File pluginDir = new File(testDir, "maven-mng3703-plugin");
File projectDir = new File(testDir, "project");
Verifier verifier;
verifier = newVerifier(pluginDir.getAbsolutePath());
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier = newVerifier(projectDir.getAbsolutePath());
verifier.setLogFileName("log-mojo.txt");
verifier.addCliArgument("package");
verifier.execute();
verifier.verifyErrorFreeLog();
}
@Test
public void testForkFromReport() throws Exception {
File testDir = extractResources("/mng-3703");
File pluginDir = new File(testDir, "maven-mng3703-plugin");
File projectDir = new File(testDir, "project");
Verifier verifier;
verifier = newVerifier(pluginDir.getAbsolutePath());
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier = newVerifier(projectDir.getAbsolutePath());
verifier.setLogFileName("log-report.txt");
verifier.addCliArgument("site");
verifier.execute();
verifier.verifyErrorFreeLog();
}
}
| MavenITmng3703ExecutionProjectWithRelativePathsTest |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragmentsTests.java | {
"start": 2845,
"end": 14899
} | class ____ {
private final BeanRegistrationsCode beanRegistrationsCode = new MockBeanRegistrationsCode(new TestGenerationContext());
private final GenerationContext generationContext = new TestGenerationContext();
private final DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory();
@Test
public void getTargetWithInstanceSupplier() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleBean.class);
beanDefinition.setInstanceSupplier(SimpleBean::new);
RegisteredBean registeredBean = registerTestBean(beanDefinition);
BeanRegistrationCodeFragments codeFragments = createInstance(registeredBean);
assertThatExceptionOfType(AotBeanProcessingException.class)
.isThrownBy(() -> codeFragments.getTarget(registeredBean))
.withMessageContaining("Error processing bean with name 'testBean': instance supplier is not supported");
}
@Test
public void getTargetWithInstanceSupplierAndResourceDescription() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleBean.class);
beanDefinition.setInstanceSupplier(SimpleBean::new);
beanDefinition.setResourceDescription("my test resource");
RegisteredBean registeredBean = registerTestBean(beanDefinition);
BeanRegistrationCodeFragments codeFragments = createInstance(registeredBean);
assertThatExceptionOfType(AotBeanProcessingException.class)
.isThrownBy(() -> codeFragments.getTarget(registeredBean))
.withMessageContaining("Error processing bean with name 'testBean' defined in my test resource: " +
"instance supplier is not supported");
}
@Test
void getTargetOnConstructor() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class,
SimpleBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorToPublicFactoryBean() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class,
SimpleBeanFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorToPublicFactoryBeanProducingArray() {
RegisteredBean registeredBean = registerTestBean(SimpleBean[].class,
SimpleBeanArrayFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorToPublicGenericFactoryBeanExtractTargetFromFactoryBeanType() {
ResolvableType beanType = ResolvableType.forClassWithGenerics(
GenericFactoryBean.class, SimpleBean.class);
RegisteredBean registeredBean = registerTestBean(beanType,
GenericFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorToPublicGenericFactoryBeanWithBoundExtractTargetFromFactoryBeanType() {
ResolvableType beanType = ResolvableType.forClassWithGenerics(
NumberFactoryBean.class, Integer.class);
RegisteredBean registeredBean = registerTestBean(beanType,
NumberFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), Integer.class);
}
@Test
void getTargetOnConstructorToPublicGenericFactoryBeanUseBeanTypeAsFallback() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class,
GenericFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorToProtectedFactoryBean() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class,
PrivilegedTestBeanFactoryBean.class.getDeclaredConstructors()[0]);
assertTarget(createInstance(registeredBean).getTarget(registeredBean),
PrivilegedTestBeanFactoryBean.class);
}
@Test
void getTargetOnMethod() {
Method method = ReflectionUtils.findMethod(SimpleBeanConfiguration.class, "simpleBean");
assertThat(method).isNotNull();
RegisteredBean registeredBean = registerTestBean(SimpleBean.class, method);
assertTarget(createInstance(registeredBean).getTarget(registeredBean),
SimpleBeanConfiguration.class);
}
@Test // gh-32609
void getTargetOnMethodFromInterface() {
this.beanFactory.registerBeanDefinition("configuration",
new RootBeanDefinition(DefaultSimpleBeanContract.class));
Method method = ReflectionUtils.findMethod(SimpleBeanContract.class, "simpleBean");
assertThat(method).isNotNull();
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleBean.class);
applyConstructorOrFactoryMethod(beanDefinition, method);
beanDefinition.setFactoryBeanName("configuration");
this.beanFactory.registerBeanDefinition("testBean", beanDefinition);
RegisteredBean registeredBean = RegisteredBean.of(this.beanFactory, "testBean");
assertTarget(createInstance(registeredBean).getTarget(registeredBean),
DefaultSimpleBeanContract.class);
}
@Test
void getTargetOnMethodWithInnerBeanInJavaPackage() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class);
Method method = ReflectionUtils.findMethod(getClass(), "createString");
assertThat(method).isNotNull();
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
applyConstructorOrFactoryMethod(new RootBeanDefinition(String.class), method));
assertTarget(createInstance(innerBean).getTarget(innerBean), getClass());
}
@Test
void getTargetOnConstructorWithInnerBeanInJavaPackage() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class);
RootBeanDefinition innerBeanDefinition = applyConstructorOrFactoryMethod(
new RootBeanDefinition(String.class), String.class.getDeclaredConstructors()[0]);
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
innerBeanDefinition);
assertTarget(createInstance(innerBean).getTarget(innerBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorWithInnerBeanOnTypeInJavaPackage() {
RegisteredBean registeredBean = registerTestBean(SimpleBean.class);
RootBeanDefinition innerBeanDefinition = applyConstructorOrFactoryMethod(
new RootBeanDefinition(StringFactoryBean.class),
StringFactoryBean.class.getDeclaredConstructors()[0]);
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
innerBeanDefinition);
assertTarget(createInstance(innerBean).getTarget(innerBean), SimpleBean.class);
}
@Test
void getTargetOnMethodWithInnerBeanInRegularPackage() {
RegisteredBean registeredBean = registerTestBean(DummyFactory.class);
Method method = ReflectionUtils.findMethod(SimpleBeanConfiguration.class, "simpleBean");
assertThat(method).isNotNull();
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
applyConstructorOrFactoryMethod(new RootBeanDefinition(SimpleBean.class), method));
assertTarget(createInstance(innerBean).getTarget(innerBean),
SimpleBeanConfiguration.class);
}
@Test
void getTargetOnConstructorWithInnerBeanInRegularPackage() {
RegisteredBean registeredBean = registerTestBean(DummyFactory.class);
RootBeanDefinition innerBeanDefinition = applyConstructorOrFactoryMethod(
new RootBeanDefinition(SimpleBean.class), SimpleBean.class.getDeclaredConstructors()[0]);
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
innerBeanDefinition);
assertTarget(createInstance(innerBean).getTarget(innerBean), SimpleBean.class);
}
@Test
void getTargetOnConstructorWithInnerBeanOnFactoryBeanOnTypeInRegularPackage() {
RegisteredBean registeredBean = registerTestBean(DummyFactory.class);
RootBeanDefinition innerBeanDefinition = applyConstructorOrFactoryMethod(
new RootBeanDefinition(SimpleBean.class),
SimpleBeanFactoryBean.class.getDeclaredConstructors()[0]);
RegisteredBean innerBean = RegisteredBean.ofInnerBean(registeredBean, "innerTestBean",
innerBeanDefinition);
assertTarget(createInstance(innerBean).getTarget(innerBean), SimpleBean.class);
}
@Test
void customizedGetTargetDoesNotResolveInstantiationDescriptor() {
RegisteredBean registeredBean = spy(registerTestBean(SimpleBean.class));
BeanRegistrationCodeFragments customCodeFragments = createCustomCodeFragments(registeredBean, codeFragments -> new BeanRegistrationCodeFragmentsDecorator(codeFragments) {
@Override
public ClassName getTarget(RegisteredBean registeredBean) {
return ClassName.get(String.class);
}
});
assertTarget(customCodeFragments.getTarget(registeredBean), String.class);
verify(registeredBean, never()).resolveInstantiationDescriptor();
}
@Test
void customizedGenerateInstanceSupplierCodeDoesNotResolveInstantiationDescriptor() {
RegisteredBean registeredBean = spy(registerTestBean(SimpleBean.class));
BeanRegistrationCodeFragments customCodeFragments = createCustomCodeFragments(registeredBean, codeFragments -> new BeanRegistrationCodeFragmentsDecorator(codeFragments) {
@Override
public CodeBlock generateInstanceSupplierCode(GenerationContext generationContext,
BeanRegistrationCode beanRegistrationCode, boolean allowDirectSupplierShortcut) {
return CodeBlock.of("// Hello");
}
});
assertThat(customCodeFragments.generateInstanceSupplierCode(this.generationContext,
new MockBeanRegistrationCode(this.generationContext), false)).hasToString("// Hello");
verify(registeredBean, never()).resolveInstantiationDescriptor();
}
private BeanRegistrationCodeFragments createCustomCodeFragments(RegisteredBean registeredBean, UnaryOperator<BeanRegistrationCodeFragments> customFragments) {
BeanRegistrationAotContribution aotContribution = BeanRegistrationAotContribution.
withCustomCodeFragments(customFragments);
BeanRegistrationCodeFragments defaultCodeFragments = createInstance(registeredBean);
return aotContribution.customizeBeanRegistrationCodeFragments(
this.generationContext, defaultCodeFragments);
}
private void assertTarget(ClassName target, Class<?> expected) {
assertThat(target).isEqualTo(ClassName.get(expected));
}
private RegisteredBean registerTestBean(Class<?> beanType) {
return registerTestBean(beanType, null);
}
private RegisteredBean registerTestBean(Class<?> beanType,
@Nullable Executable constructorOrFactoryMethod) {
this.beanFactory.registerBeanDefinition("testBean", applyConstructorOrFactoryMethod(
new RootBeanDefinition(beanType), constructorOrFactoryMethod));
return RegisteredBean.of(this.beanFactory, "testBean");
}
private RegisteredBean registerTestBean(ResolvableType beanType,
@Nullable Executable constructorOrFactoryMethod) {
RootBeanDefinition beanDefinition = new RootBeanDefinition();
beanDefinition.setTargetType(beanType);
return registerTestBean(applyConstructorOrFactoryMethod(
beanDefinition, constructorOrFactoryMethod));
}
private RegisteredBean registerTestBean(RootBeanDefinition beanDefinition) {
this.beanFactory.registerBeanDefinition("testBean", beanDefinition);
return RegisteredBean.of(this.beanFactory, "testBean");
}
private RootBeanDefinition applyConstructorOrFactoryMethod(RootBeanDefinition beanDefinition,
@Nullable Executable constructorOrFactoryMethod) {
if (constructorOrFactoryMethod instanceof Method method) {
beanDefinition.setResolvedFactoryMethod(method);
}
else if (constructorOrFactoryMethod instanceof Constructor<?> constructor) {
beanDefinition.setAttribute(RootBeanDefinition.PREFERRED_CONSTRUCTORS_ATTRIBUTE, constructor);
}
return beanDefinition;
}
private BeanRegistrationCodeFragments createInstance(RegisteredBean registeredBean) {
return new DefaultBeanRegistrationCodeFragments(this.beanRegistrationsCode, registeredBean,
new BeanDefinitionMethodGeneratorFactory(this.beanFactory));
}
@SuppressWarnings("unused")
static String createString() {
return "Test";
}
static | DefaultBeanRegistrationCodeFragmentsTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/completion/OpenShiftAiChatCompletionServiceSettings.java | {
"start": 988,
"end": 1107
} | class ____ the model ID, URI, and rate limit settings for the OpenShift AI chat completion service.
*/
public | encapsulates |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NonFinalStaticFieldTest.java | {
"start": 1558,
"end": 1850
} | class ____ {
private static final String FOO = "";
}
""")
.doTest();
}
@Test
public void positiveButNotFixable_noRefactoring() {
refactoringTestHelper
.addInputLines(
"Test.java",
"""
public | Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java | {
"start": 4439,
"end": 139864
} | class ____ {
private static final String A_PATH = CapacitySchedulerConfiguration.ROOT + ".a";
private static final String B_PATH = CapacitySchedulerConfiguration.ROOT + ".b";
private static final String C_PATH = CapacitySchedulerConfiguration.ROOT + ".c";
private static final String D_PATH = CapacitySchedulerConfiguration.ROOT + ".d";
private static final String A1_PATH = A_PATH + ".a1";
private static final String A2_PATH = A_PATH + ".a2";
private static final String B1_PATH = B_PATH + ".b1";
private static final String B2_PATH = B_PATH + ".b2";
private static final String C1_PATH = C_PATH + ".c1";
private static final String C2_PATH = C_PATH + ".c2";
private static final QueuePath ROOT = new QueuePath(CapacitySchedulerConfiguration.ROOT);
private static final QueuePath A = new QueuePath(A_PATH);
private static final QueuePath B = new QueuePath(B_PATH);
private static final QueuePath C = new QueuePath(C_PATH);
private static final QueuePath D = new QueuePath(D_PATH);
private static final QueuePath A1 = new QueuePath(A1_PATH);
private static final QueuePath A2 = new QueuePath(A2_PATH);
private static final QueuePath B1 = new QueuePath(B1_PATH);
private static final QueuePath B2 = new QueuePath(B2_PATH);
private static final QueuePath C1 = new QueuePath(C1_PATH);
private static final QueuePath C2 = new QueuePath(C2_PATH);
private final int GB = 1024;
private YarnConfiguration conf;
RMNodeLabelsManager mgr;
@BeforeEach
public void setUp() throws Exception {
conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
}
private Configuration getConfigurationWithQueueLabels(Configuration config) {
CapacitySchedulerConfiguration conf =
new CapacitySchedulerConfiguration(config);
// Define top-level queues
conf.setQueues(ROOT, new String[] {"a", "b", "c"});
conf.setCapacityByLabel(ROOT, "x", 100);
conf.setCapacityByLabel(ROOT, "y", 100);
conf.setCapacityByLabel(ROOT, "z", 100);
conf.setCapacity(A, 10);
conf.setMaximumCapacity(A, 15);
conf.setAccessibleNodeLabels(A, toSet("x"));
conf.setCapacityByLabel(A, "x", 100);
conf.setCapacity(B, 20);
conf.setAccessibleNodeLabels(B, toSet("y", "z"));
conf.setCapacityByLabel(B, "y", 100);
conf.setCapacityByLabel(B, "z", 100);
conf.setCapacity(C, 70);
conf.setMaximumCapacity(C, 70);
conf.setAccessibleNodeLabels(C, RMNodeLabelsManager.EMPTY_STRING_SET);
// Define 2nd-level queues
conf.setQueues(A, new String[] {"a1"});
conf.setCapacity(A1, 100);
conf.setMaximumCapacity(A1, 100);
conf.setCapacityByLabel(A1, "x", 100);
conf.setQueues(B, new String[] {"b1"});
conf.setCapacity(B1, 100);
conf.setMaximumCapacity(B1, 100);
conf.setCapacityByLabel(B1, "y", 100);
conf.setCapacityByLabel(B1, "z", 100);
conf.setQueues(C, new String[] {"c1"});
conf.setCapacity(C1, 100);
conf.setMaximumCapacity(C1, 100);
return conf;
}
private void checkTaskContainersHost(ApplicationAttemptId attemptId,
ContainerId containerId, ResourceManager rm, String host) {
YarnScheduler scheduler = rm.getRMContext().getScheduler();
SchedulerAppReport appReport = scheduler.getSchedulerAppInfo(attemptId);
assertTrue(appReport.getLiveContainers().size() > 0);
for (RMContainer c : appReport.getLiveContainers()) {
if (c.getContainerId().equals(containerId)) {
assertEquals(host, c.getAllocatedNode().getHost());
}
}
}
@SuppressWarnings("unchecked")
private <E> Set<E> toSet(E... elements) {
Set<E> set = Sets.newHashSet(elements);
return set;
}
@Test
@Timeout(value = 300)
public void testContainerAllocationWithSingleUserLimits() throws Exception {
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
rm1.registerNode("h2:1234", 8000); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// A has only 10% of x, so it can only allocate one container in label=empty
ContainerId containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
assertTrue(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
// Cannot allocate 2nd label=empty container
containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
// A has default user limit = 100, so it can use all resource in label = x
// We can allocate floor(8000 / 1024) = 7 containers
for (int id = 3; id <= 8; id++) {
containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), id);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
assertTrue(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED));
}
rm1.close();
}
@Test
@Timeout(value = 300)
public void testContainerAllocateWithComplexLabels() throws Exception {
/*
* Queue structure:
* root (*)
* ________________
* / \
* a x(100%), y(50%) b y(50%), z(100%)
* ________________ ______________
* / / \
* a1 (x,y) b1(no) b2(y,z)
* 100% y = 100%, z = 100%
*
* Node structure:
* h1 : x
* h2 : y
* h3 : y
* h4 : z
* h5 : NO
*
* Total resource:
* x: 4G
* y: 6G
* z: 2G
* *: 2G
*
* Resource of
* a1: x=4G, y=3G, NO=0.2G
* b1: NO=0.9G (max=1G)
* b2: y=3, z=2G, NO=0.9G (max=1G)
*
* Each node can only allocate two containers
*/
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
toSet("x"), NodeId.newInstance("h2", 0), toSet("y"),
NodeId.newInstance("h3", 0), toSet("y"), NodeId.newInstance("h4", 0),
toSet("z"), NodeId.newInstance("h5", 0),
RMNodeLabelsManager.EMPTY_STRING_SET));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getComplexConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 2048);
MockNM nm2 = rm1.registerNode("h2:1234", 2048);
MockNM nm3 = rm1.registerNode("h3:1234", 2048);
MockNM nm4 = rm1.registerNode("h4:1234", 2048);
MockNM nm5 = rm1.registerNode("h5:1234", 2048);
ContainerId containerId;
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data2);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// request a container (label = y). can be allocated on nm2
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2L);
assertTrue(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h2");
// launch an app to queue b1 (label = y), and check all container will
// be allocated in h5
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm5);
// request a container for AM, will succeed
// and now b1's queue capacity will be used, cannot allocate more containers
// (Maximum capacity reached)
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm4, containerId,
RMContainerState.ALLOCATED));
assertFalse(rm1.waitForState(nm5, containerId,
RMContainerState.ALLOCATED));
// launch an app to queue b2
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b2")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm5);
// request a container. try to allocate on nm1 (label = x) and nm3 (label =
// y,z). Will successfully allocate on nm3
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
"h3");
// try to allocate container (request label = z) on nm4 (label = y,z).
// Will successfully allocate on nm4 only.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "z");
containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 3L);
assertTrue(rm1.waitForState(nm4, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
"h4");
rm1.close();
}
@Test
@Timeout(value = 120)
public void testContainerAllocateWithLabels() throws Exception {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
ContainerId containerId;
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data2);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3);
// request a container.
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
// launch an app to queue b1 (label = y), and check all container will
// be allocated in h2
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3);
// request a container.
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
"h2");
// launch an app to queue c1 (label = ""), and check all container will
// be allocated in h3
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);
// request a container.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
"h3");
rm1.close();
}
@Test
@Timeout(value = 120)
public void testContainerAllocateWithDefaultQueueLabels() throws Exception {
// This test is pretty much similar to testContainerAllocateWithLabel.
// Difference is, this test doesn't specify label expression in ResourceRequest,
// instead, it uses default queue label expression
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
ContainerId containerId;
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data2);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// request a container.
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
// launch an app to queue b1 (label = y), and check all container will
// be allocated in h2
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// request a container.
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
"h2");
// launch an app to queue c1 (label = ""), and check all container will
// be allocated in h3
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);
// request a container.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED));
assertTrue(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED));
checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
"h3");
rm1.close();
}
@Test
@Timeout(value = 120)
public void testContainerReservationWithLabels() throws Exception {
// This test is pretty much similar to testContainerAllocateWithLabel.
// Difference is, this test doesn't specify label expression in
// ResourceRequest,
// instead, it uses default queue label expression
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y",
"z"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
toSet("x"), NodeId.newInstance("h2", 0), toSet("y"),
NodeId.newInstance("h3", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(
TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x
rm1.registerNode("h2:1234", 8 * GB); // label = y
rm1.registerNode("h3:1234", 8 * GB); // label = x
ContainerId containerId;
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// request a container.
am1.allocate("*", 4 * GB, 2, new ArrayList<ContainerId>());
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
// Do node heartbeats 2 times
// First time will allocate container for app1, second time will reserve
// container for app1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
// Check if a 4G container allocated for app1, and 4G is reserved
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1
.getApplicationAttemptId());
assertEquals(2, schedulerApp1.getLiveContainers().size());
assertTrue(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(9 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(4 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// Cancel asks of app2 and re-kick RM
am1.allocate("*", 4 * GB, 0, new ArrayList<ContainerId>());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
assertEquals(5 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(0, leafQueue.getQueueResourceUsage().getReserved("x")
.getMemorySize());
rm1.close();
}
@Test
@Timeout(value = 120)
public void testContainerReservationContinueLookingWithLabels()
throws Exception {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
toSet("x"), NodeId.newInstance("h2", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(
TestUtils.getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
ContainerId containerId;
// launch an app to queue a1 (label = x)
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app1")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1
.getApplicationAttemptId());
// Verify live on node1
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
assertEquals(1, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(2 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// request map containers for app1.
am1.allocate("*", 5 * GB, 2, 5, new ArrayList<ContainerId>(), "x");
// Do node heartbeat to allocate first mapper on node1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// Verify live on node1
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
assertEquals(2, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(7 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// Do node heartbeat to allocate second mapper on node2
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
// Verify live on node2
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h2");
// node1 7 GB used, node2 5 GB used
assertEquals(3, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(12 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// request reducer containers for app1.
am1.allocate("*", 3 * GB, 2, 10, new ArrayList<ContainerId>(), "x");
// Do node heartbeat to reserve reducer on node1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// node1 7 GB used and 3 GB reserved, node2 5 GB used
assertEquals(3, schedulerApp1.getLiveContainers().size());
assertEquals(1, schedulerApp1.getReservedContainers().size());
assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(15 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(3 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// Do node heartbeat to allocate container for second reducer on node2
// This should unreserve the reserved container
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
// Verify live on node2
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h2");
// node1 7 GB used and 0 GB reserved, node2 8 GB used
assertEquals(4, schedulerApp1.getLiveContainers().size());
assertEquals(0, schedulerApp1.getReservedContainers().size());
assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(15 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
rm1.close();
}
@Test
@Timeout(value = 120)
public void testContainerReservationContinueLookingWithDefaultLabels()
throws Exception {
// This is the same as testContainerReservationContinueLookingWithLabels,
// but this test doesn't specify the label expression in the
// ResourceRequest, instead it uses default queue label expressions
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
toSet("x"), NodeId.newInstance("h2", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(
TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
ContainerId containerId;
// launch an app to queue a1 (label = x)
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app1")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1
.getApplicationAttemptId());
// Verify live on node1
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
assertEquals(1, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(2 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// request map containers for app1.
am1.allocate("*", 5 * GB, 2, 5, new ArrayList<ContainerId>(), null);
// Do node heartbeat to allocate first mapper on node1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// Verify live on node1
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h1");
assertEquals(2, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(7 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// Do node heartbeat to allocate second mapper on node2
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
// Verify live on node2
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h2");
// node1 7 GB used, node2 5 GB used
assertEquals(3, schedulerApp1.getLiveContainers().size());
assertFalse(schedulerApp1.getReservedContainers().size() > 0);
assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(12 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// request reducer containers for app1.
am1.allocate("*", 3 * GB, 2, 10, new ArrayList<ContainerId>(), null);
// Do node heartbeat to reserve reducer on node1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// node1 7 GB used and 3 GB reserved, node2 5 GB used
assertEquals(3, schedulerApp1.getLiveContainers().size());
assertEquals(1, schedulerApp1.getReservedContainers().size());
assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(15 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(3 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
// Do node heartbeat to allocate container for second reducer on node2
// This should unreserve the reserved container
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
// Verify live on node2
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5);
checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
"h2");
// node1 7 GB used and 0 GB reserved, node2 8 GB used
assertEquals(4, schedulerApp1.getLiveContainers().size());
assertEquals(0, schedulerApp1.getReservedContainers().size());
assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage()
.getUsed("x").getMemorySize());
assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
.getReserved("x").getMemorySize());
assertEquals(15 * GB,
leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize());
assertEquals(0 * GB,
leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize());
rm1.close();
}
@Test
@Timeout(value = 120)
public void testRMContainerLeakInLeafQueue() throws Exception {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
NodeId.newInstance("h2", 0), toSet("x")));
// inject node label manager
MockRM rm1 =
new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x
rm1.registerNode("h2:1234", 8 * GB); // label = x
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app1")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app2")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data);
MockRM.launchAndRegisterAM(app2, rm1, nm1);
// request a container.
am1.allocate("*", 7 * GB, 2, new ArrayList<ContainerId>());
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
// Do node heartbeats 1 time
// scheduler will reserve a container for app1
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// Check if a 4G container allocated for app1, and 4G is reserved
FiCaSchedulerApp schedulerApp1 =
cs.getApplicationAttempt(am1.getApplicationAttemptId());
assertEquals(1, schedulerApp1.getLiveContainers().size());
assertEquals(1, schedulerApp1.getReservedContainers().size());
// kill app2 then do node heartbeat 1 time
// scheduler will allocate a container from the reserved container on nm1
rm1.killApp(app2.getApplicationId());
rm1.waitForState(app2.getApplicationId(), RMAppState.KILLED);
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
assertEquals(2, schedulerApp1.getLiveContainers().size());
assertEquals(0, schedulerApp1.getReservedContainers().size());
// After kill app1, LeafQueue#ignorePartitionExclusivityRMContainers should
// be clean, otherwise resource leak happened
rm1.killApp(app1.getApplicationId());
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
assertEquals(0, leafQueue.getIgnoreExclusivityRMContainers().size());
rm1.close();
}
private void checkPendingResource(MockRM rm, int priority,
ApplicationAttemptId attemptId, int memory) {
CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler();
FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId);
PendingAsk ask =
app.getAppSchedulingInfo().getPendingAsk(
TestUtils.toSchedulerKey(priority), "*");
assertEquals(memory,
ask.getPerAllocationResource().getMemorySize() * ask
.getCount());
}
private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId,
int numContainers) {
CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler();
SchedulerNode node = cs.getSchedulerNode(nodeId);
assertEquals(numContainers, node.getNumContainers());
}
/**
* JIRA YARN-4140, In Resource request set node label will be set only on ANY
* reqest. RACK/NODE local and default requests label expression need to be
* updated. This testcase is to verify the label expression is getting changed
* based on ANY requests.
*
* @throws Exception
*/
@Test
public void testResourceRequestUpdateNodePartitions() throws Exception {
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"),
NodeLabel.newInstance("y", false), NodeLabel.newInstance("z", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm2 = rm1.registerNode("h2:1234", 40 * GB); // label = y
// launch an app to queue b1 (label = y), AM container should be launched in
// nm2
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// Creating request set when request before ANY is not having label and any
// is having label
List<ResourceRequest> resourceRequest = new ArrayList<ResourceRequest>();
resourceRequest.add(am1.createResourceReq("/default-rack", 1024, 3, 1,
RMNodeLabelsManager.NO_LABEL));
resourceRequest.add(am1.createResourceReq("*", 1024, 3, 5, "y"));
resourceRequest.add(am1.createResourceReq("h1:1234", 1024, 3, 2,
RMNodeLabelsManager.NO_LABEL));
resourceRequest.add(am1.createResourceReq("*", 1024, 2, 3, "y"));
resourceRequest.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null));
resourceRequest.add(am1.createResourceReq("*", 1024, 4, 3, null));
resourceRequest.add(am1.createResourceReq("h2:1234", 1024, 4, 4, null));
am1.allocate(resourceRequest, new ArrayList<ContainerId>());
CapacityScheduler cs =
(CapacityScheduler) rm1.getRMContext().getScheduler();
FiCaSchedulerApp app =
cs.getApplicationAttempt(am1.getApplicationAttemptId());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "y");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4,
RMNodeLabelsManager.NO_LABEL);
// Previous any request was Y trying to update with z and the
// request before ANY label is null
List<ResourceRequest> newReq = new ArrayList<ResourceRequest>();
newReq.add(am1.createResourceReq("h2:1234", 1024, 3, 4, null));
newReq.add(am1.createResourceReq("*", 1024, 3, 5, "z"));
newReq.add(am1.createResourceReq("h1:1234", 1024, 3, 4, null));
newReq.add(am1.createResourceReq("*", 1024, 4, 5, "z"));
am1.allocate(newReq, new ArrayList<ContainerId>());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "z");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, "z");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
// Request before ANY and ANY request is set as NULL. Request should be set
// with Empty Label
List<ResourceRequest> resourceRequest1 = new ArrayList<ResourceRequest>();
resourceRequest1.add(am1.createResourceReq("/default-rack", 1024, 3, 1,
null));
resourceRequest1.add(am1.createResourceReq("*", 1024, 3, 5, null));
resourceRequest1.add(am1.createResourceReq("h1:1234", 1024, 3, 2,
RMNodeLabelsManager.NO_LABEL));
resourceRequest1.add(am1.createResourceReq("/default-rack", 1024, 2, 1,
null));
resourceRequest1.add(am1.createResourceReq("*", 1024, 2, 3,
RMNodeLabelsManager.NO_LABEL));
resourceRequest1.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null));
am1.allocate(resourceRequest1, new ArrayList<ContainerId>());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3,
RMNodeLabelsManager.NO_LABEL);
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2,
RMNodeLabelsManager.NO_LABEL);
rm1.stop();
}
private void checkNodePartitionOfRequestedPriority(AppSchedulingInfo info,
int priority, String expectedPartition) {
for (SchedulerRequestKey key : info.getSchedulerKeys()) {
if (key.getPriority().getPriority() == priority) {
assertEquals(expectedPartition,
info.getAppPlacementAllocator(key).getPrimaryRequestedNodePartition(),
"Expected partition is " + expectedPartition);
}
}
}
@Test
public void testPreferenceOfNeedyAppsTowardsNodePartitions() throws Exception {
/**
* Test case: Submit two application to a queue (app1 first then app2), app1
* asked for no-label, app2 asked for label=x, when node1 has label=x
* doing heart beat, app2 will get allocation first, even if app2 submits later
* than app1
*/
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x"), NodeLabel.newInstance("y", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
// launch an app to queue b1 (label = y), AM container should be launched in nm2
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// launch another app to queue b1 (label = y), AM container should be launched in nm2
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// request container and nm1 do heartbeat (nm2 has label=y), note that app1
// request non-labeled container, and app2 request labeled container, app2
// will get allocated first even if app1 submitted first.
am1.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>());
am2.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>(), "y");
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
// Do node heartbeats many times
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// App2 will get preference to be allocated on node1, and node1 will be all
// used by App2.
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
// app1 get nothing in nm1 (partition=y)
checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), schedulerApp1);
checkNumOfContainersInAnAppOnGivenNode(9, nm2.getNodeId(), schedulerApp1);
// app2 get all resource in nm1 (partition=y)
checkNumOfContainersInAnAppOnGivenNode(8, nm1.getNodeId(), schedulerApp2);
checkNumOfContainersInAnAppOnGivenNode(1, nm2.getNodeId(), schedulerApp2);
rm1.close();
}
private void checkNumOfContainersInAnAppOnGivenNode(int expectedNum,
NodeId nodeId, FiCaSchedulerApp app) {
int num = 0;
for (RMContainer container : app.getLiveContainers()) {
if (container.getAllocatedNode().equals(nodeId)) {
num++;
}
}
assertEquals(expectedNum, num);
}
@Test
public void
testPreferenceOfNeedyPrioritiesUnderSameAppTowardsNodePartitions()
throws Exception {
/**
* Test case: Submit one application, it asks label="" in priority=1 and
* label="x" in priority=2, when a node with label=x heartbeat, priority=2
* will get allocation first even if there're pending resource in priority=1
*/
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x"), NodeLabel.newInstance("y", false)));
// Makes y to be non-exclusive node labels
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
// launch an app to queue b1 (label = y), AM container should be launched in nm3
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// request containers from am2, priority=1 asks for "" and priority=2 asks
// for "y", "y" container should be allocated first
am1.allocate("*", 1 * GB, 1, 1, new ArrayList<ContainerId>(), "");
am1.allocate("*", 1 * GB, 1, 2, new ArrayList<ContainerId>(), "y");
// Do a node heartbeat once
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
cs.handle(new NodeUpdateSchedulerEvent(
rm1.getRMContext().getRMNodes().get(nm1.getNodeId())));
// Check pending resource for am2, priority=1 doesn't get allocated before
// priority=2 allocated
checkPendingResource(rm1, 1, am1.getApplicationAttemptId(), 1 * GB);
checkPendingResource(rm1, 2, am1.getApplicationAttemptId(), 0 * GB);
rm1.close();
}
@Test
public void testNonLabeledResourceRequestGetPreferrenceToNonLabeledNode()
throws Exception {
/**
* Test case: Submit one application, it asks 6 label="" containers, NM1
* with label=y and NM2 has no label, NM1/NM2 doing heartbeat together. Even
* if NM1 has idle resource, containers are all allocated to NM2 since
* non-labeled request should get allocation on non-labeled nodes first.
*/
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
ContainerId nextContainerId;
// launch an app to queue b1 (label = y), AM container should be launched in nm3
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// request containers from am2, priority=1 asks for "" * 6 (id from 4 to 9),
// nm2/nm3 do
// heartbeat at the same time, check containers are always allocated to nm3.
// This is to verify when there's resource available in non-labeled
// partition, non-labeled resource should allocate to non-labeled partition
// first.
am1.allocate("*", 1 * GB, 6, 1, new ArrayList<ContainerId>(), "");
for (int i = 2; i < 2 + 6; i++) {
nextContainerId =
ContainerId.newContainerId(am1.getApplicationAttemptId(), i);
assertTrue(rm1.waitForState(Arrays.asList(nm1, nm2),
nextContainerId, RMContainerState.ALLOCATED));
}
// no more container allocated on nm1
checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 0);
// all 7 (1 AM container + 6 task container) containers allocated on nm2
checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 7);
rm1.close();
}
@Test
public void testPreferenceOfQueuesTowardsNodePartitions()
throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / | \
* a b c
* / \ / \ / \
* a1 a2 b1 b2 c1 c2
* (x) (x) (x)
* </pre>
*
* Only a1, b1, c1 can access label=x, and their default label=x Each each
* has one application, asks for 5 containers. NM1 has label=x
*
* NM1/NM2 doing heartbeat for 15 times, it should allocate all 15
* containers with label=x
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b", "c"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 33);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 33);
csConf.setQueues(A, new String[] {"a1", "a2"});
csConf.setCapacity(B, 33);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 33);
csConf.setQueues(B, new String[] {"b1", "b2"});
csConf.setCapacity(C, 34);
csConf.setAccessibleNodeLabels(C, toSet("x"));
csConf.setCapacityByLabel(C, "x", 34);
csConf.setQueues(C, new String[] {"c1", "c2"});
// Define 2nd-level queues
csConf.setCapacity(A1, 50);
csConf.setCapacityByLabel(A1, "x", 100);
csConf.setDefaultNodeLabelExpression(A1, "x");
csConf.setCapacity(A2, 50);
csConf.setCapacityByLabel(A2, "x", 0);
csConf.setCapacity(B1, 50);
csConf.setCapacityByLabel(B1, "x", 100);
csConf.setDefaultNodeLabelExpression(B1, "x");
csConf.setCapacity(B2, 50);
csConf.setCapacityByLabel(B2, "x", 0);
csConf.setCapacity(C1, 50);
csConf.setCapacityByLabel(C1, "x", 100);
csConf.setDefaultNodeLabelExpression(C1, "x");
csConf.setCapacity(C2, 50);
csConf.setCapacityByLabel(C2, "x", 0);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
// app1 -> a1
MockRMAppSubmissionData data5 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data5);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// app2 -> a2
MockRMAppSubmissionData data4 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a2")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data4);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// app3 -> b1
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data3);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
// app4 -> b2
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b2")
.withUnmanagedAM(false)
.build();
RMApp app4 = MockRMAppSubmitter.submit(rm1, data2);
MockAM am4 = MockRM.launchAndRegisterAM(app4, rm1, nm2);
// app5 -> c1
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app5 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am5 = MockRM.launchAndRegisterAM(app5, rm1, nm1);
// app6 -> b2
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c2")
.withUnmanagedAM(false)
.build();
RMApp app6 = MockRMAppSubmitter.submit(rm1, data);
MockAM am6 = MockRM.launchAndRegisterAM(app6, rm1, nm2);
// Each application request 5 * 1GB container
am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
am2.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
am3.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
am4.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
am5.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
am6.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
// NM1 do 15 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
for (int i = 0; i < 15; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
// NM1 get 15 new containers (total is 18, 15 task containers and 3 AM
// containers)
checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 18);
// Check pending resource each application
// APP1/APP3/APP5 get satisfied, and APP2/APP2/APP3 get nothing.
checkPendingResource(rm1, 1, am1.getApplicationAttemptId(), 0 * GB);
checkPendingResource(rm1, 1, am2.getApplicationAttemptId(), 5 * GB);
checkPendingResource(rm1, 1, am3.getApplicationAttemptId(), 0 * GB);
checkPendingResource(rm1, 1, am4.getApplicationAttemptId(), 5 * GB);
checkPendingResource(rm1, 1, am5.getApplicationAttemptId(), 0 * GB);
checkPendingResource(rm1, 1, am6.getApplicationAttemptId(), 5 * GB);
rm1.close();
}
@Test
public void testQueuesWithoutAccessUsingPartitionedNodes() throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* (x)
* </pre>
*
* Only a can access label=x, two nodes in the cluster, n1 has x and n2 has
* no-label.
*
* When user-limit-factor=5, submit one application in queue b and request
* for infinite containers should be able to use up all cluster resources.
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 50);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 100);
csConf.setCapacity(B, 50);
csConf.setAccessibleNodeLabels(B, new HashSet<String>());
csConf.setUserLimitFactor(B, 5);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = <empty>
// app1 -> b
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// Each application request 50 * 1GB container
am1.allocate("*", 1 * GB, 50, new ArrayList<ContainerId>());
// NM1 do 50 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
// How much cycles we waited to be allocated when available resource only on
// partitioned node
int cycleWaited = 0;
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
if (schedulerNode1.getNumContainers() == 0) {
cycleWaited++;
}
}
// We will will 10 cycles before get allocated on partitioned node
// NM2 can allocate 10 containers totally, exclude already allocated AM
// container, we will wait 9 to fulfill non-partitioned node, and need wait
// one more cycle before allocating to non-partitioned node
assertEquals(10, cycleWaited);
// Both NM1/NM2 launched 10 containers, cluster resource is exhausted
checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 10);
checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 10);
rm1.close();
}
@Test
public void testAMContainerAllocationWillAlwaysBeExclusive()
throws Exception {
/**
* Test case: Submit one application without partition, trying to allocate a
* node has partition=x, it should fail to allocate since AM container will
* always respect exclusivity for partitions
*/
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
Resource resource = Resource.newInstance(8 * GB, 8);
((NullRMNodeLabelsManager)mgr).setResourceForLabel(CommonNodeLabelsManager.NO_LABEL, resource);
rm1.start();
String nodeIdStr = "h1:1234";
MockNM nm1 = rm1.registerNode(nodeIdStr, resource); // label = x
// launch an app to queue b1 (label = y), AM container should be launched in nm3
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app = MockRMAppSubmitter.submit(rm1, data);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
// Heartbeat for many times, app1 should get nothing
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
assertTrue(app.getDiagnostics().toString().contains(
CSAMContainerLaunchDiagnosticsConstants.SKIP_AM_ALLOCATION_IN_IGNORE_EXCLUSIVE_MODE),
"Scheduler diagnostics should have reason for not assigning the node");
assertTrue(app.getDiagnostics().toString().contains(
CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
+ nodeIdStr + " ( Partition : [x]"),
"Scheduler diagnostics should have last processed node information");
assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
.getNumContainers());
rm1.close();
}
@Test
@Timeout(value = 60)
public void
testQueueMaxCapacitiesWillNotBeHonoredWhenNotRespectingExclusivity()
throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* (x) (x)
* </pre>
*
* a/b can access x, both of them has max-capacity-on-x = 50
*
* When doing non-exclusive allocation, app in a (or b) can use 100% of x
* resource.
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 50);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setMaximumCapacityByLabel(A, "x", 50);
csConf.setUserLimit(A, 200);
csConf.setCapacity(B, 50);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
csConf.setMaximumCapacityByLabel(B, "x", 50);
csConf.setUserLimit(B, 200);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = <empty>
// app1 -> a
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// app1 asks for 10 partition= containers
am1.allocate("*", 1 * GB, 10, new ArrayList<ContainerId>());
// NM1 do 50 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
// app1 gets all resource in partition=x
assertEquals(10, schedulerNode1.getNumContainers());
// check non-exclusive containers of LeafQueue is correctly updated
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
assertFalse(leafQueue.getIgnoreExclusivityRMContainers().containsKey(
"y"));
assertEquals(10,
leafQueue.getIgnoreExclusivityRMContainers().get("x").size());
// completes all containers of app1, ignoreExclusivityRMContainers should be
// updated as well.
cs.handle(new AppAttemptRemovedSchedulerEvent(
am1.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false));
assertFalse(leafQueue.getIgnoreExclusivityRMContainers().containsKey(
"x"));
rm1.close();
}
private void checkQueueUsedCapacity(String queueName, CapacityScheduler cs,
String nodePartition, float usedCapacity, float absoluteUsedCapacity) {
float epsilon = 1e-6f;
CSQueue queue = cs.getQueue(queueName);
assertNotNull(queue, "Failed to get queue=" + queueName);
assertEquals(usedCapacity, queue.getQueueCapacities()
.getUsedCapacity(nodePartition), epsilon);
assertEquals(absoluteUsedCapacity, queue.getQueueCapacities()
.getAbsoluteUsedCapacity(nodePartition), epsilon);
}
private void doNMHeartbeat(MockRM rm, NodeId nodeId, int nHeartbeat) {
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nodeId);
for (int i = 0; i < nHeartbeat; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
}
private void waitSchedulerNodeJoined(MockRM rm, int expectedNodeNum)
throws InterruptedException {
int totalWaitTick = 100; // wait 10 sec at most.
while (expectedNodeNum > rm.getResourceScheduler().getNumClusterNodes()
&& totalWaitTick > 0) {
Thread.sleep(100);
totalWaitTick--;
}
}
private void waitSchedulerNodeHasUpdatedLabels(CapacityScheduler cs,
MockNM nm, String partition) throws InterruptedException {
FiCaSchedulerNode node = cs.getNode(nm.getNodeId());
int totalWaitTick = 20; // wait 2 sec at most.
while (!node.getLabels().contains(partition)
&& totalWaitTick > 0) {
Thread.sleep(100);
totalWaitTick--;
}
}
@Test
public void testQueueUsedCapacitiesUpdate()
throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* / \ (x)
* a1 a2
* (x) (x)
* </pre>
*
* Both a/b can access x, we need to verify when
* <pre>
* 1) container allocated/released in both partitioned/non-partitioned node,
* 2) clusterResource updates
* 3) queue guaranteed resource changed
* </pre>
*
* used capacity / absolute used capacity of queues are correctly updated.
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b"});
csConf.setCapacityByLabel(ROOT, "x", 100);
/**
* Initially, we set A/B's resource 50:50
*/
csConf.setCapacity(A, 50);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setQueues(A, new String[] { "a1", "a2" });
csConf.setCapacity(A1, 50);
csConf.setAccessibleNodeLabels(A1, toSet("x"));
csConf.setCapacityByLabel(A1, "x", 50);
csConf.setCapacity(A2, 50);
csConf.setAccessibleNodeLabels(A2, toSet("x"));
csConf.setCapacityByLabel(A2, "x", 50);
csConf.setCapacity(B, 50);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
/*
* Before we adding any node to the cluster, used-capacity/abs-used-capacity
* should be 0
*/
checkQueueUsedCapacity("a", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a", cs, "", 0f, 0f);
checkQueueUsedCapacity("a1", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a1", cs, "", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "", 0f, 0f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0f, 0f);
checkQueueUsedCapacity("root", cs, "", 0f, 0f);
MockNM nm1 = rm.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm.registerNode("h2:1234", 10 * GB); // label = <empty>
/*
* After we adding nodes to the cluster, and before starting to use them,
* used-capacity/abs-used-capacity should be 0
*/
checkQueueUsedCapacity("a", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a", cs, "", 0f, 0f);
checkQueueUsedCapacity("a1", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a1", cs, "", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "", 0f, 0f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0f, 0f);
checkQueueUsedCapacity("root", cs, "", 0f, 0f);
// app1 -> a1
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
// app1 asks for 1 partition= containers
am1.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>());
doNMHeartbeat(rm, nm2.getNodeId(), 10);
// Now check usage, app1 uses:
// a1: used(no-label) = 80%
// abs-used(no-label) = 20%
// a: used(no-label) = 40%
// abs-used(no-label) = 20%
// root: used(no-label) = 20%
// abs-used(no-label) = 20%
checkQueueUsedCapacity("a", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a", cs, "", 0.4f, 0.2f);
checkQueueUsedCapacity("a1", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a1", cs, "", 0.8f, 0.2f);
checkQueueUsedCapacity("a2", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "", 0f, 0f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0f, 0f);
checkQueueUsedCapacity("root", cs, "", 0.2f, 0.2f);
// app1 asks for 2 partition=x containers
am1.allocate("*", 1 * GB, 2, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 10);
// Now check usage, app1 uses:
// a1: used(x) = 80%
// abs-used(x) = 20%
// a: used(x) = 40%
// abs-used(x) = 20%
// root: used(x) = 20%
// abs-used(x) = 20%
checkQueueUsedCapacity("a", cs, "x", 0.4f, 0.2f);
checkQueueUsedCapacity("a", cs, "", 0.4f, 0.2f);
checkQueueUsedCapacity("a1", cs, "x", 0.8f, 0.2f);
checkQueueUsedCapacity("a1", cs, "", 0.8f, 0.2f);
checkQueueUsedCapacity("a2", cs, "x", 0f, 0f);
checkQueueUsedCapacity("a2", cs, "", 0f, 0f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0.2f, 0.2f);
checkQueueUsedCapacity("root", cs, "", 0.2f, 0.2f);
// submit an app to a2, uses 1 NON_PARTITIONED container and 1 PARTITIONED
// container
// app2 -> a2
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a2")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm, data);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
// app1 asks for 1 partition= containers
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 10);
// Now check usage, app1 uses:
// a2: used(x) = 40%
// abs-used(x) = 10%
// a: used(x) = 20%
// abs-used(x) = 10%
// root: used(x) = 10%
// abs-used(x) = 10%
checkQueueUsedCapacity("a", cs, "x", 0.6f, 0.3f);
checkQueueUsedCapacity("a", cs, "", 0.6f, 0.3f);
checkQueueUsedCapacity("a1", cs, "x", 0.8f, 0.2f);
checkQueueUsedCapacity("a1", cs, "", 0.8f, 0.2f);
checkQueueUsedCapacity("a2", cs, "x", 0.4f, 0.1f);
checkQueueUsedCapacity("a2", cs, "", 0.4f, 0.1f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0.3f, 0.3f);
checkQueueUsedCapacity("root", cs, "", 0.3f, 0.3f);
// Add nm3/nm4, double resource for both partitioned/non-partitioned
// resource, used capacity should be 1/2 of before
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h3", 0), toSet("x")));
MockNM nm3 = rm.registerNode("h3:1234", 10 * GB); // label = x
MockNM nm4 = rm.registerNode("h4:1234", 10 * GB); // label = <empty>
waitSchedulerNodeJoined(rm, 4);
waitSchedulerNodeHasUpdatedLabels(cs, nm3, "x");
waitSchedulerNodeHasUpdatedLabels(cs, nm4, "");
checkQueueUsedCapacity("a", cs, "x", 0.3f, 0.15f);
checkQueueUsedCapacity("a", cs, "", 0.3f, 0.15f);
checkQueueUsedCapacity("a1", cs, "x", 0.4f, 0.1f);
checkQueueUsedCapacity("a1", cs, "", 0.4f, 0.1f);
checkQueueUsedCapacity("a2", cs, "x", 0.2f, 0.05f);
checkQueueUsedCapacity("a2", cs, "", 0.2f, 0.05f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0.15f, 0.15f);
checkQueueUsedCapacity("root", cs, "", 0.15f, 0.15f);
// Reinitialize queue, makes A's capacity double, and B's capacity to be 0
csConf.setCapacity(A, 100); // was 50
csConf.setCapacityByLabel(A, "x", 100); // was 50
csConf.setCapacity(B, 0); // was 50
csConf.setCapacityByLabel(B, "x", 0); // was 50
cs.reinitialize(csConf, rm.getRMContext());
checkQueueUsedCapacity("a", cs, "x", 0.15f, 0.15f);
checkQueueUsedCapacity("a", cs, "", 0.15f, 0.15f);
checkQueueUsedCapacity("a1", cs, "x", 0.2f, 0.1f);
checkQueueUsedCapacity("a1", cs, "", 0.2f, 0.1f);
checkQueueUsedCapacity("a2", cs, "x", 0.1f, 0.05f);
checkQueueUsedCapacity("a2", cs, "", 0.1f, 0.05f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0.15f, 0.15f);
checkQueueUsedCapacity("root", cs, "", 0.15f, 0.15f);
// Release all task containers from a1, check usage
am1.allocate(null, Arrays.asList(
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2),
ContainerId.newContainerId(am1.getApplicationAttemptId(), 3),
ContainerId.newContainerId(am1.getApplicationAttemptId(), 4)));
checkQueueUsedCapacity("a", cs, "x", 0.05f, 0.05f);
checkQueueUsedCapacity("a", cs, "", 0.10f, 0.10f);
checkQueueUsedCapacity("a1", cs, "x", 0.0f, 0.0f);
checkQueueUsedCapacity("a1", cs, "", 0.1f, 0.05f);
checkQueueUsedCapacity("a2", cs, "x", 0.1f, 0.05f);
checkQueueUsedCapacity("a2", cs, "", 0.1f, 0.05f);
checkQueueUsedCapacity("b", cs, "x", 0f, 0f);
checkQueueUsedCapacity("b", cs, "", 0f, 0f);
checkQueueUsedCapacity("root", cs, "x", 0.05f, 0.05f);
checkQueueUsedCapacity("root", cs, "", 0.10f, 0.10f);
rm.close();
}
@Test
public void testOrderOfAllocationOnPartitions()
throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* ________________
* / | \ \
* a (x) b (x) c d
* </pre>
*
* Both a/b can access x, we need to verify when
* <pre>
* When doing allocation on partitioned nodes,
* - Queue has accessibility to the node will go first
* - When accessibility is same
* - Queue has less used_capacity on given partition will go first
* - When used_capacity is same
* - Queue has more abs_capacity will go first
* </pre>
*
* used capacity / absolute used capacity of queues are correctly updated.
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b", "c", "d"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 25);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 30);
csConf.setCapacity(B, 25);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 70);
csConf.setAccessibleNodeLabels(C, Collections.<String> emptySet());
csConf.setCapacity(C, 25);
csConf.setAccessibleNodeLabels(D, Collections.<String> emptySet());
csConf.setCapacity(D, 25);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
MockNM nm1 = rm.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm.registerNode("h2:1234", 10 * GB); // label = <empty>
// app1 -> a
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data3);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
// app2 -> b
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b")
.withUnmanagedAM(false)
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm, data2);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
// app3 -> c
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm, data1);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm, nm2);
// app4 -> d
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("d")
.withUnmanagedAM(false)
.build();
RMApp app4 = MockRMAppSubmitter.submit(rm, data);
MockAM am4 = MockRM.launchAndRegisterAM(app4, rm, nm2);
// Test case 1
// Both a/b has used_capacity(x) = 0, when doing exclusive allocation, b
// will go first since b has more capacity(x)
am1.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
// Test case 2
// Do another allocation, a will go first since it has 0 use_capacity(x) and
// b has 1/7 used_capacity(x)
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
// Test case 3
// Just like above, when doing non-exclusive allocation, b will go first as well.
am1.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "");
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "");
doNMHeartbeat(rm, nm1.getNodeId(), 2);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
// Test case 4
// After b allocated, we should be able to allocate non-exlusive container in a
doNMHeartbeat(rm, nm1.getNodeId(), 2);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
// Test case 5
// b/c/d asks non-exclusive container together, b will go first irrelated to
// used_capacity(x)
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "");
am3.allocate("*", 1 * GB, 2, new ArrayList<ContainerId>(), "");
am4.allocate("*", 1 * GB, 2, new ArrayList<ContainerId>(), "");
doNMHeartbeat(rm, nm1.getNodeId(), 2);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(),
cs.getApplicationAttempt(am3.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(),
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
// Test case 6
// After b allocated, c will go first by lexicographic order
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am3.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(),
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
// Test case 7
// After c allocated, d will go first because it has less used_capacity(x)
// than c
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am3.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
// Test case 8
// After d allocated, c will go first, c/d has same use_capacity(x), so compare c/d's lexicographic order
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am3.getApplicationAttemptId()));
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
rm.stop();
}
@Test
public void testOrderOfAllocationOnPartitionsWhenAccessibilityIsAll()
throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* __________
* / \
* a (*) b (x)
* </pre>
*
* Both queues a/b can access x, we need to verify whether * accessibility
* is considered in ordering of queues
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] { "a", "b" });
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 25);
csConf.setAccessibleNodeLabels(A, toSet("*"));
csConf.setCapacityByLabel(A, "x", 60);
csConf.setCapacity(B, 75);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 40);
// set node -> label
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
MockNM nm1 = rm.registerNode("h1:1234", 10 * GB); // label = x
// app1 -> a
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
// app2 -> b
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b")
.withAmLabel("x")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm, data);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
// Both a/b has used_capacity(x) = 0, when doing exclusive allocation, a
// will go first since a has more capacity(x)
am1.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 1);
checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
rm.stop();
}
@Test
public void testParentQueueMaxCapsAreRespected() throws Exception {
/*
* Queue tree:
* Root
* / \
* A B
* / \
* A1 A2
*
* A has 50% capacity and 50% max capacity (of label=x)
* A1/A2 has 50% capacity and 100% max capacity (of label=x)
* Cluster has one node (label=x) with resource = 24G.
* So we can at most use 12G resources under queueA.
*/
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(ROOT, new String[] {"a", "b"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 10);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setMaximumCapacityByLabel(A, "x", 50);
csConf.setCapacity(B, 90);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
csConf.setMaximumCapacityByLabel(B, "x", 50);
// Define 2nd-level queues
csConf.setQueues(A, new String[] { "a1",
"a2"});
csConf.setCapacity(A1, 50);
csConf.setAccessibleNodeLabels(A1, toSet("x"));
csConf.setCapacityByLabel(A1, "x", 50);
csConf.setMaximumCapacityByLabel(A1, "x", 100);
csConf.setUserLimitFactor(A1, 100.0f);
csConf.setCapacity(A2, 50);
csConf.setAccessibleNodeLabels(A2, toSet("x"));
csConf.setCapacityByLabel(A2, "x", 50);
csConf.setMaximumCapacityByLabel(A2, "x", 100);
csConf.setUserLimitFactor(A2, 100.0f);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(
NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
MockNM nm1 =
new MockNM("h1:1234", 24 * GB, rm.getResourceTrackerService());
nm1.registerNode();
// Launch app1 in a1, resource usage is 1GB (am) + 4GB * 2 = 9GB
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
am1.allocate("*", 4 * GB, 2, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 10);
checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
// Try to launch app2 in a2, asked 2GB, should success
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a2")
.withAmLabel("x")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm, data);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
// am2 asks more resources, cannot success because current used = 9G (app1)
// + 2G (app2) = 11G, and queue's max capacity = 12G
am2.allocate("*", 2 * GB, 2, new ArrayList<ContainerId>(), "x");
doNMHeartbeat(rm, nm1.getNodeId(), 10);
checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
rm.stop();
}
@Test
public void testQueueMetricsWithLabels() throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* (x) (x)
* </pre>
*
* a/b can access x, both of them has max-capacity-on-x = 50
*
* When doing non-exclusive allocation, app in a (or b) can use 100% of x
* resource.
*/
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] { "a", "b" });
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 25);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setMaximumCapacityByLabel(A, "x", 50);
csConf.setCapacity(B, 75);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
csConf.setMaximumCapacityByLabel(B, "x", 50);
// set node -> label
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", false)));
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("y", false)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = y
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueueA = (LeafQueue) cs.getQueue("a");
assertEquals(0 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
LeafQueue leafQueueB = (LeafQueue) cs.getQueue("b");
assertEquals(0 * GB, leafQueueB.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueB.getMetrics().getAllocatedMB());
// app1 -> a
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// app1 asks for 5 partition=x containers
am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
// app1 gets all resource in partition=x
assertEquals(5, schedulerNode1.getNumContainers());
SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(5 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(5 * GB,
reportNm1.getAvailableResource().getMemorySize());
SchedulerNodeReport reportNm2 = rm1.getResourceScheduler()
.getNodeReport(nm2.getNodeId());
assertEquals(0 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(10 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(0 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
assertEquals(0 * GB, leafQueueB.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueB.getMetrics().getAllocatedMB());
CSQueue rootQueue = cs.getRootQueue();
assertEquals(0 * GB, rootQueue.getMetrics().getAvailableMB()
+ rootQueue.getMetrics().getAllocatedMB());
// Kill all apps in queue a
cs.killAllAppsInQueue("a");
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
assertEquals(0 * GB, leafQueueA.getMetrics().getUsedAMResourceMB());
assertEquals(0, leafQueueA.getMetrics().getUsedAMResourceVCores());
rm1.close();
}
@Test
public void testQueueMetricsWithLabelsOnDefaultLabelNode() throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* (x) (x)
* </pre>
*
* a/b can access x, both of them has max-capacity-on-x = 50
*
* When doing non-exclusive allocation, app in a (or b) can use 100% of x
* resource.
*/
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] { "a", "b" });
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 25);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setMaximumCapacityByLabel(A, "x", 50);
csConf.setCapacity(B, 75);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
csConf.setMaximumCapacityByLabel(B, "x", 50);
csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
// set node -> label
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = <no_label>
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
double delta = 0.0001;
CSQueue leafQueue = cs.getQueue("a");
CSQueue leafQueueB = cs.getQueue("b");
CSQueue rootQueue = cs.getRootQueue();
assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta);
assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta);
MetricsSystem ms = leafQueueB.getMetrics().getMetricsSystem();
QueueMetrics partXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "x");
QueueMetrics partDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "");
QueueMetrics queueAMetrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a");
QueueMetrics queueBMetrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root.b");
QueueMetrics queueAPartDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a");
QueueMetrics queueAPartXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a");
QueueMetrics queueBPartDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.b");
QueueMetrics queueBPartXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.b");
QueueMetrics rootMetrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root");
assertEquals(10 * GB, partXMetrics.getAvailableMB(), delta);
assertEquals(10 * GB, partDefaultMetrics.getAvailableMB(), delta);
assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta);
assertEquals(10 * GB, rootMetrics.getAvailableMB(), delta);
assertEquals(2.5 * GB, queueAMetrics.getAvailableMB(), delta);
assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta);
// app1 -> a
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// app1 asks for 3 partition= containers
am1.allocate("*", 1 * GB, 3, new ArrayList<ContainerId>());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// app1 gets all resource in partition=x (non-exclusive)
assertEquals(3, schedulerNode1.getNumContainers());
SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(3 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(7 * GB,
reportNm1.getAvailableResource().getMemorySize());
SchedulerNodeReport reportNm2 = rm1.getResourceScheduler()
.getNodeReport(nm2.getNodeId());
assertEquals(1 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(9 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta);
assertEquals(9 * GB, partDefaultMetrics.getAvailableMB(), delta);
assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta);
assertEquals(1 * GB, queueAMetrics.getAllocatedMB(), delta);
assertEquals(1.5 * GB, queueAMetrics.getAvailableMB(), delta);
assertEquals(0 * GB, queueBMetrics.getAllocatedMB(), delta);
assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta);
assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueBPartDefaultMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueBPartXMetrics.getPendingMB(), delta);
assertEquals(1.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
assertEquals(1 * GB, leafQueue.getMetrics().getAllocatedMB());
assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta);
assertEquals(1 * GB, partDefaultMetrics.getAllocatedMB(), delta);
QueueMetrics partDefaultQueueAUserMetrics =
(QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "", "user",
"root.a");
QueueMetrics partXQueueAUserMetrics =
(QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "x", "user",
"root.a");
QueueMetrics queueAUserMetrics =
(QueueMetrics) TestQueueMetrics.userSource(ms, "root.a", "user");
assertEquals(2 * GB, queueAUserMetrics.getAvailableMB(), delta);
assertEquals(1 * GB, queueAUserMetrics.getAllocatedMB(), delta);
assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
assertEquals(2, schedulerNode2.getNumContainers());
assertEquals(3, schedulerNode1.getNumContainers());
// 3GB is used from label x quota. 2GB used from default label.
// So 0.5 GB is remaining from default label.
assertEquals(5 * GB / 10, leafQueue.getMetrics().getAvailableMB());
assertEquals(2 * GB, leafQueue.getMetrics().getAllocatedMB());
// The total memory tracked by QueueMetrics is 10GB
// for the default partition
assertEquals(10*GB, rootQueue.getMetrics().getAvailableMB() +
rootQueue.getMetrics().getAllocatedMB());
assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAMetrics.getAllocatedMB());
assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(),
delta);
assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta);
assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta);
assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta);
// Pending Resources when containers are waiting on "default" partition
assertEquals(4 * GB, queueAMetrics.getPendingMB(), delta);
assertEquals(4 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
assertEquals(4 * GB, partDefaultQueueAUserMetrics.getPendingMB(),
delta);
assertEquals(4 * GB, queueAUserMetrics.getPendingMB(), delta);
assertEquals(4 * GB, partDefaultMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partXMetrics.getPendingMB(), delta);
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAMetrics.getAllocatedMB());
assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
assertEquals(0 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(7 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(),
delta);
assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
assertEquals(0 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
assertEquals(7 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta);
assertEquals(3 * GB, partXMetrics.getAvailableMB(), delta);
assertEquals(7 * GB, partXMetrics.getAllocatedMB(), delta);
assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta);
assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta);
// Pending Resources after containers has been assigned on "x" partition
assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partDefaultQueueAUserMetrics.getPendingMB(),
delta);
assertEquals(0 * GB, queueAUserMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partDefaultMetrics.getPendingMB(), delta);
assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta);
assertEquals(0 * GB, partXMetrics.getPendingMB(), delta);
rm1.killApp(app1.getApplicationId());
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta);
assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta);
assertEquals(2, queueAMetrics.getAggregateAllocatedContainers());
assertEquals(2, queueAMetrics.getAggegatedReleasedContainers());
assertEquals(2, queueAPartDefaultMetrics.getAggregateAllocatedContainers());
assertEquals(2, queueAPartDefaultMetrics.getAggegatedReleasedContainers());
assertEquals(7, partXMetrics.getAggregateAllocatedContainers());
assertEquals(2, partDefaultMetrics.getAggregateAllocatedContainers());
assertEquals(7, queueAPartXMetrics.getAggregateAllocatedContainers());
assertEquals(7, queueAPartXMetrics.getAggegatedReleasedContainers());
assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, queueAUserMetrics.getAvailableMB(), delta);
assertEquals(3 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
assertEquals(5 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
rm1.close();
}
@Test
public void testQueueMetricsWithMixedLabels() throws Exception {
// There is only one queue which can access both default label and label x.
// There are two nodes of 10GB label x and 12GB no label.
// The test is to make sure that the queue metrics is only tracking the
// allocations and availability from default partition
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] {"a"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 100);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 100);
csConf.setMaximumCapacityByLabel(A, "x", 100);
// set node -> label
// label x exclusivity is set to true
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", true)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 12 * GB); // label = <no_label>
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueueA = (LeafQueue) cs.getQueue("a");
assertEquals(12 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
// app1 -> a
RMApp app1 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withAmLabel("x")
.build());
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// app1 asks for 5 partition=x containers
am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
// app1 gets all resource in partition=x
assertEquals(6, schedulerNode1.getNumContainers());
SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(4 * GB,
reportNm1.getAvailableResource().getMemorySize());
SchedulerNodeReport reportNm2 = rm1.getResourceScheduler()
.getNodeReport(nm2.getNodeId());
assertEquals(0 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(12 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(12 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
// app2 -> a
RMApp app2 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a")
.withAmLabel("")
.build());
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// app2 asks for 5 partition= containers
am2.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>(), "");
// NM2 do 50 heartbeats
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// app1 gets all resource in partition=x
assertEquals(6, schedulerNode2.getNumContainers());
reportNm1 = rm1.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(4 * GB,
reportNm1.getAvailableResource().getMemorySize());
reportNm2 = rm1.getResourceScheduler().getNodeReport(nm2.getNodeId());
assertEquals(6 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(6 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(6 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(6 * GB, leafQueueA.getMetrics().getAllocatedMB());
// The total memory tracked by QueueMetrics is 12GB
// for the default partition
CSQueue rootQueue = cs.getRootQueue();
assertEquals(12 * GB, rootQueue.getMetrics().getAvailableMB()
+ rootQueue.getMetrics().getAllocatedMB());
// Kill all apps in queue a
cs.killAllAppsInQueue("a");
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
assertEquals(0 * GB, leafQueueA.getMetrics().getUsedAMResourceMB());
assertEquals(0, leafQueueA.getMetrics().getUsedAMResourceVCores());
rm1.close();
}
@Test
public void testTwoLevelQueueMetricsWithLabels() throws Exception {
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] {"a"});
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 100);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 100);
csConf.setMaximumCapacityByLabel(A, "x", 100);
csConf.setQueues(A, new String[] {"a1"});
csConf.setCapacity(A1, 100);
csConf.setAccessibleNodeLabels(A1, toSet("x"));
csConf.setCapacityByLabel(A1, "x", 100);
csConf.setMaximumCapacityByLabel(A1, "x", 100);
// set node -> label
// label x exclusivity is set to true
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", true)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 12 * GB); // label = <no_label>
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
ParentQueue leafQueueA = (ParentQueue) cs.getQueue("a");
LeafQueue leafQueueA1 = (LeafQueue) cs.getQueue("a1");
assertEquals(12 * GB, leafQueueA1.getMetrics().getAvailableMB());
assertEquals(0 * GB, leafQueueA1.getMetrics().getAllocatedMB());
MetricsSystem ms = leafQueueA1.getMetrics().getMetricsSystem();
QueueMetrics partXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "x");
QueueMetrics partDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "");
QueueMetrics queueAPartDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a");
QueueMetrics queueAPartXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a");
QueueMetrics queueA1PartDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a.a1");
QueueMetrics queueA1PartXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a.a1");
QueueMetrics queueRootPartDefaultMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root");
QueueMetrics queueRootPartXMetrics =
(QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root");
QueueMetrics queueAMetrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a");
QueueMetrics queueA1Metrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a.a1");
QueueMetrics queueRootMetrics =
(QueueMetrics) TestQueueMetrics.queueSource(ms, "root");
assertEquals(12 * GB, queueAMetrics.getAvailableMB());
assertEquals(12 * GB, queueA1Metrics.getAvailableMB());
assertEquals(12 * GB, queueRootMetrics.getAvailableMB());
assertEquals(12 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(10 * GB, queueA1PartXMetrics.getAvailableMB());
assertEquals(10 * GB, queueAPartXMetrics.getAvailableMB());
assertEquals(10 * GB, queueRootPartXMetrics.getAvailableMB());
assertEquals(12 * GB, queueA1PartDefaultMetrics.getAvailableMB());
assertEquals(12 * GB, queueAPartDefaultMetrics.getAvailableMB());
assertEquals(12 * GB, queueRootPartDefaultMetrics.getAvailableMB());
assertEquals(10 * GB, partXMetrics.getAvailableMB());
assertEquals(12 * GB, partDefaultMetrics.getAvailableMB());
// app1 -> a
RMApp app1 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build());
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// app1 asks for 5 partition=x containers
am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
// app1 gets all resource in partition=x
assertEquals(6, schedulerNode1.getNumContainers());
SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(4 * GB, reportNm1.getAvailableResource().getMemorySize());
SchedulerNodeReport reportNm2 = rm1.getResourceScheduler()
.getNodeReport(nm2.getNodeId());
assertEquals(0 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(12 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(0 * GB, queueAMetrics.getAllocatedMB());
assertEquals(0 * GB, queueA1Metrics.getAllocatedMB());
assertEquals(0 * GB, queueRootMetrics.getAllocatedMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB());
assertEquals(6 * GB, queueA1PartXMetrics.getAllocatedMB());
assertEquals(6 * GB, queueAPartXMetrics.getAllocatedMB());
assertEquals(6 * GB, queueRootPartXMetrics.getAllocatedMB());
assertEquals(0 * GB, queueA1PartDefaultMetrics.getAllocatedMB());
assertEquals(0 * GB, queueAPartDefaultMetrics.getAllocatedMB());
assertEquals(0 * GB, queueRootPartDefaultMetrics.getAllocatedMB());
assertEquals(6 * GB, partXMetrics.getAllocatedMB());
assertEquals(0 * GB, partDefaultMetrics.getAllocatedMB());
// app2 -> a
RMApp app2 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("")
.build());
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// app2 asks for 5 partition= containers
am2.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>(), "");
// NM2 do 50 heartbeats
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId());
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// app1 gets all resource in partition=x
assertEquals(6, schedulerNode2.getNumContainers());
reportNm1 = rm1.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(4 * GB,
reportNm1.getAvailableResource().getMemorySize());
reportNm2 = rm1.getResourceScheduler().getNodeReport(nm2.getNodeId());
assertEquals(6 * GB, reportNm2.getUsedResource().getMemorySize());
assertEquals(6 * GB,
reportNm2.getAvailableResource().getMemorySize());
assertEquals(6 * GB, leafQueueA.getMetrics().getAvailableMB());
assertEquals(6 * GB, leafQueueA.getMetrics().getAllocatedMB());
// The total memory tracked by QueueMetrics is 12GB
// for the default partition
CSQueue rootQueue = cs.getRootQueue();
assertEquals(12 * GB, rootQueue.getMetrics().getAvailableMB()
+ rootQueue.getMetrics().getAllocatedMB());
assertEquals(6 * GB, queueAMetrics.getAllocatedMB());
assertEquals(6 * GB, queueA1Metrics.getAllocatedMB());
assertEquals(6 * GB, queueRootMetrics.getAllocatedMB());
assertEquals(6 * GB, queueA1PartXMetrics.getAllocatedMB());
assertEquals(6 * GB, queueAPartXMetrics.getAllocatedMB());
assertEquals(6 * GB, queueRootPartXMetrics.getAllocatedMB());
assertEquals(6 * GB, queueA1PartDefaultMetrics.getAllocatedMB());
assertEquals(6 * GB, queueAPartDefaultMetrics.getAllocatedMB());
assertEquals(6 * GB, queueRootPartDefaultMetrics.getAllocatedMB());
assertEquals(6 * GB, partXMetrics.getAllocatedMB());
assertEquals(6 * GB, partDefaultMetrics.getAllocatedMB());
// Kill all apps in queue a
cs.killAllAppsInQueue("a1");
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
assertEquals(0 * GB, leafQueueA.getMetrics().getUsedAMResourceMB());
assertEquals(0, leafQueueA.getMetrics().getUsedAMResourceVCores());
rm1.close();
}
@Test
public void testQueueMetricsWithLabelsDisableElasticity() throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
*
* root
* / \
* a b
* (x) (x)
* / \
* a1 a2
* (x) (x)
* </pre>
*
* a/b can access x, both of them has max-capacity-on-x = 50
*
* When doing non-exclusive allocation, app in a (or b) can use 100% of x
* resource.
*/
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
this.conf);
// Define top-level queues
csConf.setQueues(ROOT,
new String[] { "a", "b" });
csConf.setCapacityByLabel(ROOT, "x", 100);
csConf.setCapacity(A, 50);
csConf.setMaximumCapacity(A, 100);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 50);
csConf.setMaximumCapacityByLabel(A, "x", 100);
csConf.setCapacity(B, 50);
csConf.setMaximumCapacity(B, 100);
csConf.setAccessibleNodeLabels(B, toSet("x"));
csConf.setCapacityByLabel(B, "x", 50);
csConf.setMaximumCapacityByLabel(B, "x", 100);
// Define 2nd-level queues
csConf.setQueues(A, new String[] {"a1", "a2"});
csConf.setCapacity(A1, 20);
csConf.setMaximumCapacity(A1, 60);
csConf.setAccessibleNodeLabels(A1, toSet("x"));
csConf.setCapacityByLabel(A1, "x", 60);
csConf.setMaximumCapacityByLabel(A1, "x", 30);
csConf.setCapacity(A2, 80);
csConf.setMaximumCapacity(A2, 40);
csConf.setAccessibleNodeLabels(A2, toSet("x"));
csConf.setCapacityByLabel(A2, "x", 40);
csConf.setMaximumCapacityByLabel(A2, "x", 20);
// set node -> label
mgr.addToCluserNodeLabels(
ImmutableSet.of(NodeLabel.newInstance("x", false)));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB); // label = x
// app1 -> a1
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data3);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// app1 asks for 6 partition=x containers
am1.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
doNMHeartbeat(rm1, nm1.getNodeId(), 50);
checkNumOfContainersInAnAppOnGivenNode(6, nm1.getNodeId(),
cs.getApplicationAttempt(am1.getApplicationAttemptId()));
SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(14 * GB,
reportNm1.getAvailableResource().getMemorySize());
// Try to launch app2 in a2, asked 2GB, should success
// app2 -> a2
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a2")
.withAmLabel("x")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data2);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
// app2 asks for 4 partition=x containers
am2.allocate("*", 1 * GB, 4, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
doNMHeartbeat(rm1, nm1.getNodeId(), 50);
checkNumOfContainersInAnAppOnGivenNode(4, nm1.getNodeId(),
cs.getApplicationAttempt(am2.getApplicationAttemptId()));
reportNm1 = rm1.getResourceScheduler()
.getNodeReport(nm1.getNodeId());
assertEquals(10 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(10 * GB,
reportNm1.getAvailableResource().getMemorySize());
// Kill all apps in queue a2
cs.killAllAppsInQueue("a2");
rm1.waitForState(app2.getApplicationId(), RMAppState.KILLED);
rm1.waitForAppRemovedFromScheduler(app2.getApplicationId());
// Try to launch app3 in a2, asked 6GB, should fail
// app3 -> a2
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a2")
.withAmLabel("x")
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
am3.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
doNMHeartbeat(rm1, nm1.getNodeId(), 50);
// app3 cannot preempt more resources restricted by disable elasticity
checkNumOfContainersInAnAppOnGivenNode(4, nm1.getNodeId(),
cs.getApplicationAttempt(am3.getApplicationAttemptId()));
assertEquals(10 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(10 * GB,
reportNm1.getAvailableResource().getMemorySize());
// Kill all apps in queue a1
cs.killAllAppsInQueue("a1");
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
// app4 -> a1, try to allocate more than 6GB resource, should fail
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app4 = MockRMAppSubmitter.submit(rm1, data);
MockAM am4 = MockRM.launchAndRegisterAM(app4, rm1, nm1);
// app3 asks for 7 partition=x containers
am4.allocate("*", 1 * GB, 7, new ArrayList<ContainerId>(), "x");
// NM1 do 50 heartbeats
doNMHeartbeat(rm1, nm1.getNodeId(), 50);
// app4 should only gets 6GB resource in partition=x
// since elasticity is disabled
checkNumOfContainersInAnAppOnGivenNode(6, nm1.getNodeId(),
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
assertEquals(10 * GB, reportNm1.getUsedResource().getMemorySize());
assertEquals(10 * GB,
reportNm1.getAvailableResource().getMemorySize());
rm1.close();
}
}
| TestNodeLabelContainerAllocation |
java | google__guava | guava/src/com/google/common/collect/Sets.java | {
"start": 73691,
"end": 79685
} | class ____<E extends @Nullable Object>
extends ForwardingSortedSet<E> implements NavigableSet<E>, Serializable {
private final NavigableSet<E> delegate;
private final SortedSet<E> unmodifiableDelegate;
UnmodifiableNavigableSet(NavigableSet<E> delegate) {
this.delegate = checkNotNull(delegate);
this.unmodifiableDelegate = Collections.unmodifiableSortedSet(delegate);
}
@Override
protected SortedSet<E> delegate() {
return unmodifiableDelegate;
}
// default methods not forwarded by ForwardingSortedSet
@Override
public boolean removeIf(java.util.function.Predicate<? super E> filter) {
throw new UnsupportedOperationException();
}
@Override
public Stream<E> stream() {
return delegate.stream();
}
@Override
public Stream<E> parallelStream() {
return delegate.parallelStream();
}
@Override
public void forEach(Consumer<? super E> action) {
delegate.forEach(action);
}
@Override
public @Nullable E lower(@ParametricNullness E e) {
return delegate.lower(e);
}
@Override
public @Nullable E floor(@ParametricNullness E e) {
return delegate.floor(e);
}
@Override
public @Nullable E ceiling(@ParametricNullness E e) {
return delegate.ceiling(e);
}
@Override
public @Nullable E higher(@ParametricNullness E e) {
return delegate.higher(e);
}
@Override
public @Nullable E pollFirst() {
throw new UnsupportedOperationException();
}
@Override
public @Nullable E pollLast() {
throw new UnsupportedOperationException();
}
@LazyInit private transient @Nullable UnmodifiableNavigableSet<E> descendingSet;
@Override
public NavigableSet<E> descendingSet() {
UnmodifiableNavigableSet<E> result = descendingSet;
if (result == null) {
result = descendingSet = new UnmodifiableNavigableSet<>(delegate.descendingSet());
result.descendingSet = this;
}
return result;
}
@Override
public Iterator<E> descendingIterator() {
return Iterators.unmodifiableIterator(delegate.descendingIterator());
}
@Override
public NavigableSet<E> subSet(
@ParametricNullness E fromElement,
boolean fromInclusive,
@ParametricNullness E toElement,
boolean toInclusive) {
return unmodifiableNavigableSet(
delegate.subSet(fromElement, fromInclusive, toElement, toInclusive));
}
@Override
public NavigableSet<E> headSet(@ParametricNullness E toElement, boolean inclusive) {
return unmodifiableNavigableSet(delegate.headSet(toElement, inclusive));
}
@Override
public NavigableSet<E> tailSet(@ParametricNullness E fromElement, boolean inclusive) {
return unmodifiableNavigableSet(delegate.tailSet(fromElement, inclusive));
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
/**
* Returns a synchronized (thread-safe) navigable set backed by the specified navigable set. In
* order to guarantee serial access, it is critical that <b>all</b> access to the backing
* navigable set is accomplished through the returned navigable set (or its views).
*
* <p>It is imperative that the user manually synchronize on the returned sorted set when
* iterating over it or any of its {@code descendingSet}, {@code subSet}, {@code headSet}, or
* {@code tailSet} views.
*
* {@snippet :
* NavigableSet<E> set = synchronizedNavigableSet(new TreeSet<E>());
* ...
* synchronized (set) {
* // Must be in the synchronized block
* Iterator<E> it = set.iterator();
* while (it.hasNext()) {
* foo(it.next());
* }
* }
* }
*
* <p>or:
*
* {@snippet :
* NavigableSet<E> set = synchronizedNavigableSet(new TreeSet<E>());
* NavigableSet<E> set2 = set.descendingSet().headSet(foo);
* ...
* synchronized (set) { // Note: set, not set2!!!
* // Must be in the synchronized block
* Iterator<E> it = set2.descendingIterator();
* while (it.hasNext()) {
* foo(it.next());
* }
* }
* }
*
* <p>Failure to follow this advice may result in non-deterministic behavior.
*
* <p>The returned navigable set will be serializable if the specified navigable set is
* serializable.
*
* <p><b>Java 8+ users and later:</b> Prefer {@link Collections#synchronizedNavigableSet}.
*
* @param navigableSet the navigable set to be "wrapped" in a synchronized navigable set.
* @return a synchronized view of the specified navigable set.
* @since 13.0
*/
@GwtIncompatible // NavigableSet
@J2ktIncompatible // Synchronized
public static <E extends @Nullable Object> NavigableSet<E> synchronizedNavigableSet(
NavigableSet<E> navigableSet) {
return Synchronized.navigableSet(navigableSet);
}
/** Remove each element in an iterable from a set. */
static boolean removeAllImpl(Set<?> set, Iterator<?> iterator) {
boolean changed = false;
while (iterator.hasNext()) {
changed |= set.remove(iterator.next());
}
return changed;
}
static boolean removeAllImpl(Set<?> set, Collection<?> collection) {
checkNotNull(collection); // for GWT
if (collection instanceof Multiset) {
collection = ((Multiset<?>) collection).elementSet();
}
/*
* AbstractSet.removeAll(List) has quadratic behavior if the list size
* is just more than the set's size. We augment the test by
* assuming that sets have fast contains() performance, and other
* collections don't. See
* https://github.com/google/guava/issues/1013
*/
if (collection instanceof Set && collection.size() > set.size()) {
return Iterators.removeAll(set.iterator(), collection);
} else {
return removeAllImpl(set, collection.iterator());
}
}
@GwtIncompatible // NavigableSet
static | UnmodifiableNavigableSet |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/voyageai/embeddings/VoyageAIEmbeddingsServiceSettings.java | {
"start": 1732,
"end": 10371
} | class ____ extends FilteredXContentObject implements ServiceSettings {
public static final String NAME = "voyageai_embeddings_service_settings";
static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user";
public static final VoyageAIEmbeddingsServiceSettings EMPTY_SETTINGS = new VoyageAIEmbeddingsServiceSettings(
null,
null,
null,
null,
null,
false
);
public static final String EMBEDDING_TYPE = "embedding_type";
private static final TransportVersion VOYAGE_AI_INTEGRATION_ADDED = TransportVersion.fromName("voyage_ai_integration_added");
public static VoyageAIEmbeddingsServiceSettings fromMap(Map<String, Object> map, ConfigurationParseContext context) {
return switch (context) {
case REQUEST -> fromRequestMap(map, context);
case PERSISTENT -> fromPersistentMap(map, context);
};
}
private static VoyageAIEmbeddingsServiceSettings fromRequestMap(Map<String, Object> map, ConfigurationParseContext context) {
ValidationException validationException = new ValidationException();
var commonServiceSettings = VoyageAIServiceSettings.fromMap(map, context);
VoyageAIEmbeddingType embeddingTypes = parseEmbeddingType(map, context, validationException);
SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException);
Integer dims = removeAsType(map, DIMENSIONS, Integer.class);
Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new VoyageAIEmbeddingsServiceSettings(commonServiceSettings, embeddingTypes, similarity, dims, maxInputTokens, dims != null);
}
private static VoyageAIEmbeddingsServiceSettings fromPersistentMap(Map<String, Object> map, ConfigurationParseContext context) {
ValidationException validationException = new ValidationException();
var commonServiceSettings = VoyageAIServiceSettings.fromMap(map, context);
VoyageAIEmbeddingType embeddingTypes = parseEmbeddingType(map, context, validationException);
SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException);
Integer dims = removeAsType(map, DIMENSIONS, Integer.class);
Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class);
Boolean dimensionsSetByUser = removeAsType(map, DIMENSIONS_SET_BY_USER, Boolean.class);
if (dimensionsSetByUser == null) {
dimensionsSetByUser = Boolean.FALSE;
}
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new VoyageAIEmbeddingsServiceSettings(
commonServiceSettings,
embeddingTypes,
similarity,
dims,
maxInputTokens,
dimensionsSetByUser
);
}
static VoyageAIEmbeddingType parseEmbeddingType(
Map<String, Object> map,
ConfigurationParseContext context,
ValidationException validationException
) {
return switch (context) {
case REQUEST, PERSISTENT -> Objects.requireNonNullElse(
extractOptionalEnum(
map,
EMBEDDING_TYPE,
ModelConfigurations.SERVICE_SETTINGS,
VoyageAIEmbeddingType::fromString,
EnumSet.allOf(VoyageAIEmbeddingType.class),
validationException
),
VoyageAIEmbeddingType.FLOAT
);
};
}
private final VoyageAIServiceSettings commonSettings;
private final VoyageAIEmbeddingType embeddingType;
private final SimilarityMeasure similarity;
private final Integer dimensions;
private final Integer maxInputTokens;
private final boolean dimensionsSetByUser;
public VoyageAIEmbeddingsServiceSettings(
VoyageAIServiceSettings commonSettings,
@Nullable VoyageAIEmbeddingType embeddingType,
@Nullable SimilarityMeasure similarity,
@Nullable Integer dimensions,
@Nullable Integer maxInputTokens,
boolean dimensionsSetByUser
) {
this.commonSettings = commonSettings;
this.similarity = similarity;
this.dimensions = dimensions;
this.maxInputTokens = maxInputTokens;
this.embeddingType = embeddingType;
this.dimensionsSetByUser = dimensionsSetByUser;
}
public VoyageAIEmbeddingsServiceSettings(StreamInput in) throws IOException {
this.commonSettings = new VoyageAIServiceSettings(in);
this.similarity = in.readOptionalEnum(SimilarityMeasure.class);
this.dimensions = in.readOptionalVInt();
this.maxInputTokens = in.readOptionalVInt();
this.embeddingType = Objects.requireNonNullElse(in.readOptionalEnum(VoyageAIEmbeddingType.class), VoyageAIEmbeddingType.FLOAT);
this.dimensionsSetByUser = in.readBoolean();
}
public VoyageAIServiceSettings getCommonSettings() {
return commonSettings;
}
@Override
public SimilarityMeasure similarity() {
return similarity;
}
@Override
public Integer dimensions() {
return dimensions;
}
public Integer maxInputTokens() {
return maxInputTokens;
}
@Override
public String modelId() {
return commonSettings.modelId();
}
public VoyageAIEmbeddingType getEmbeddingType() {
return embeddingType;
}
@Override
public DenseVectorFieldMapper.ElementType elementType() {
return embeddingType == null ? DenseVectorFieldMapper.ElementType.FLOAT : embeddingType.toElementType();
}
@Override
public Boolean dimensionsSetByUser() {
return this.dimensionsSetByUser;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder = commonSettings.toXContentFragment(builder, params);
if (similarity != null) {
builder.field(SIMILARITY, similarity);
}
if (dimensions != null) {
builder.field(DIMENSIONS, dimensions);
}
if (maxInputTokens != null) {
builder.field(MAX_INPUT_TOKENS, maxInputTokens);
}
if (embeddingType != null) {
builder.field(EMBEDDING_TYPE, embeddingType);
}
builder.endObject();
return builder;
}
@Override
protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException {
commonSettings.toXContentFragmentOfExposedFields(builder, params);
return builder;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
assert false : "should never be called when supportsVersion is used";
return VOYAGE_AI_INTEGRATION_ADDED;
}
@Override
public boolean supportsVersion(TransportVersion version) {
return version.supports(VOYAGE_AI_INTEGRATION_ADDED);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
commonSettings.writeTo(out);
out.writeOptionalEnum(SimilarityMeasure.translateSimilarity(similarity, out.getTransportVersion()));
out.writeOptionalVInt(dimensions);
out.writeOptionalVInt(maxInputTokens);
out.writeOptionalEnum(embeddingType);
out.writeBoolean(dimensionsSetByUser);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
VoyageAIEmbeddingsServiceSettings that = (VoyageAIEmbeddingsServiceSettings) o;
return Objects.equals(commonSettings, that.commonSettings)
&& Objects.equals(similarity, that.similarity)
&& Objects.equals(dimensions, that.dimensions)
&& Objects.equals(maxInputTokens, that.maxInputTokens)
&& Objects.equals(embeddingType, that.embeddingType)
&& Objects.equals(dimensionsSetByUser, that.dimensionsSetByUser);
}
@Override
public int hashCode() {
return Objects.hash(commonSettings, similarity, dimensions, maxInputTokens, embeddingType, dimensionsSetByUser);
}
}
| VoyageAIEmbeddingsServiceSettings |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/binding/ComponentDeclarations.java | {
"start": 16368,
"end": 16707
} | class ____ to {@link Key} but uses {@link TypeName} rather than {@code XType}.
*
* <p>We use {@code TypeName} rather than {@code XType} here because we can lose variance
* information when unwrapping an {@code XType} in KSP (b/352142595), and using {@code TypeName}
* avoids this issue.
*/
@AutoValue
abstract static | similar |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LockOnNonEnclosingClassLiteralTest.java | {
"start": 2919,
"end": 3162
} | class ____ {
public void methodContainsSynchronizedBlock() {
synchronized (SubClass.class) {
}
}
}
}\
""")
.doTest();
}
}
| SubClass |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/TestConstructorIntegrationTests.java | {
"start": 1922,
"end": 3389
} | class ____ {
@BeforeEach
@AfterEach
void clearSpringProperty() {
setSpringProperty(null);
}
@Test
void autowireModeNotSetToAll() {
EngineTestKit.engine("junit-jupiter")
.selectors(selectClass(AutomaticallyAutowiredTestCase.class))
.execute()
.testEvents()
.assertStatistics(stats -> stats.started(1).succeeded(0).failed(1))
.assertThatEvents().haveExactly(1, event(test("test"),
finishedWithFailure(
instanceOf(ParameterResolutionException.class),
message(msg -> msg.matches(".+for parameter \\[java\\.lang\\.String .+\\] in constructor.+")))));
}
@Test
void autowireModeSetToAllViaSpringProperties() {
setSpringProperty("all");
EngineTestKit.engine("junit-jupiter")
.selectors(selectClass(AutomaticallyAutowiredTestCase.class))
.execute()
.testEvents()
.assertStatistics(stats -> stats.started(1).succeeded(1).failed(0));
}
@Test
void autowireModeSetToAllViaJUnitPlatformConfigurationParameter() {
EngineTestKit.engine("junit-jupiter")
.selectors(selectClass(AutomaticallyAutowiredTestCase.class))
.configurationParameter(TEST_CONSTRUCTOR_AUTOWIRE_MODE_PROPERTY_NAME, "all")
.execute()
.testEvents()
.assertStatistics(stats -> stats.started(1).succeeded(1).failed(0));
}
private void setSpringProperty(String flag) {
SpringProperties.setProperty(TEST_CONSTRUCTOR_AUTOWIRE_MODE_PROPERTY_NAME, flag);
}
@SpringJUnitConfig
@FailingTestCase
static | TestConstructorIntegrationTests |
java | quarkusio__quarkus | extensions/smallrye-jwt/deployment/src/test/java/io/quarkus/jwt/test/SignSecretKeyInlinedUnitTest.java | {
"start": 287,
"end": 982
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(DefaultGroupsEndpoint.class)
.addAsResource("secretKey.jwk")
.addAsResource("applicationSignSecretKeyInlined.properties", "application.properties"));
@Test
public void echoGroups() {
String token = Jwt.upn("upn").groups("User").sign();
RestAssured.given().auth()
.oauth2(token)
.get("/endp/echo")
.then().assertThat().statusCode(200)
.body(equalTo("User"));
}
}
| SignSecretKeyInlinedUnitTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/form/ClientFormParamFromMethodTest.java | {
"start": 704,
"end": 2513
} | class ____ {
@TestHTTPResource
URI baseUri;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(Client.class, SubClient.class, Resource.class, ComputedParam.class));
@Test
void shouldUseValuesOnlyFromClass() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.setFromClass()).isEqualTo("1/");
}
@Test
void shouldUseValuesFromClassAndMethod() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.setFromMethodAndClass()).isEqualTo("1/2");
}
@Test
void shouldUseValuesFromMethodWithParam() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.setFromMethodWithParam()).isEqualTo("-11/-2");
}
@Test
void shouldUseValuesFromFormParam() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.setFromFormParam("111")).isEqualTo("111/2");
}
@Test
void shouldUseValuesFromFormParams() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.setFromFormParams("111", "222")).isEqualTo("111/222");
}
@Test
void shouldUseValuesFromSubclientAnnotations() {
Client client = RestClientBuilder.newBuilder().baseUri(baseUri)
.build(Client.class);
assertThat(client.sub().sub("22")).isEqualTo("11/22");
}
@Path("/")
@ApplicationScoped
public static | ClientFormParamFromMethodTest |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/window/grouping/HeapWindowsGroupingTest.java | {
"start": 17290,
"end": 18770
} | class ____ implements RowIterator<BinaryRowData> {
private BinaryRowData row = new BinaryRowData(1);
private BinaryRowWriter writer = new BinaryRowWriter(row);
private List<Long> assignedWindowStart;
private int count;
TestInputIterator(Long[] assignedWindowStart) {
this.assignedWindowStart = Arrays.asList(assignedWindowStart);
this.assignedWindowStart.sort(
(o1, o2) -> {
if (o1 == null && o2 == null) {
return 0;
} else {
if (o1 == null) {
return -1;
}
if (o2 == null) {
return 1;
}
return (int) (o1 - o2);
}
});
this.count = 0;
}
@Override
public boolean advanceNext() {
return count < assignedWindowStart.size();
}
@Override
public BinaryRowData getRow() {
writer.reset();
if (assignedWindowStart.get(count) == null) {
writer.setNullAt(0);
} else {
writer.writeLong(0, assignedWindowStart.get(count));
}
writer.complete();
count++;
return row;
}
}
}
| TestInputIterator |
java | elastic__elasticsearch | x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TextStructExecutor.java | {
"start": 797,
"end": 826
} | class ____ we can
*/
public | when |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java | {
"start": 11117,
"end": 12539
} | class ____ extends TimelineV2ClientImpl {
private Map<Identifier, TimelineEntity> lastPublishedEntities =
new HashMap<>();
public DummyTimelineClient(ApplicationId appId) {
super(appId);
}
@Override
public void putEntitiesAsync(TimelineEntity... entities)
throws IOException, YarnException {
putEntities(entities);
}
@Override
public void putEntities(TimelineEntity... entities)
throws IOException, YarnException {
for (TimelineEntity timelineEntity : entities) {
TimelineEntity entity =
lastPublishedEntities.get(timelineEntity.getIdentifier());
if (entity == null) {
lastPublishedEntities.put(timelineEntity.getIdentifier(),
timelineEntity);
} else {
entity.addMetrics(timelineEntity.getMetrics());
entity.addEvents(timelineEntity.getEvents());
entity.addInfo(timelineEntity.getInfo());
entity.addConfigs(timelineEntity.getConfigs());
entity.addRelatesToEntities(timelineEntity.getRelatesToEntities());
entity
.addIsRelatedToEntities(timelineEntity.getIsRelatedToEntities());
}
}
}
public Collection<TimelineEntity> getLastPublishedEntities() {
return lastPublishedEntities.values();
}
public void reset() {
lastPublishedEntities = null;
}
}
}
| DummyTimelineClient |
java | google__dagger | javatests/dagger/internal/codegen/MapKeyProcessorTest.java | {
"start": 3595,
"end": 4663
} | enum ____ {",
" ADMIN,",
" LOGIN;",
"}");
CompilerTests.daggerCompiler(enumKeyFile, pathEnumFile)
.withAdditionalJavacProcessors(new AutoAnnotationProcessor())
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
// TODO(b/264464791): There is no AutoAnnotationProcessor for KSP.
assume().that(CompilerTests.backend(subject)).isNotEqualTo(Backend.KSP);
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/Container_PathKeyCreator"));
});
}
@Test
public void nestedComplexMapKey_buildSuccessfully() {
Source outerKey =
CompilerTests.javaSource(
"test.OuterKey",
"package test;",
"import dagger.MapKey;",
"import java.lang.annotation.Retention;",
"import static java.lang.annotation.RetentionPolicy.RUNTIME;",
"",
"@MapKey(unwrapValue = false)",
"public @ | PathEnum |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/transport/HttpsRedirectFilterTests.java | {
"start": 1927,
"end": 6248
} | class ____ {
HttpsRedirectFilter filter;
@Mock
FilterChain chain;
@BeforeEach
public void configureFilter() {
this.filter = new HttpsRedirectFilter();
}
@Test
public void filterWhenRequestIsInsecureThenRedirects() throws Exception {
HttpServletRequest request = get("http://localhost");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(302);
assertThat(redirectedUrl(response)).isEqualTo("https://localhost");
}
@Test
public void filterWhenExchangeIsSecureThenNoRedirect() throws Exception {
HttpServletRequest request = get("https://localhost");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(200);
}
@Test
public void filterWhenExchangeMismatchesThenNoRedirect() throws Exception {
RequestMatcher matcher = mock(RequestMatcher.class);
this.filter.setRequestMatcher(matcher);
HttpServletRequest request = get("http://localhost:8080");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(200);
}
@Test
public void filterWhenExchangeMatchesAndRequestIsInsecureThenRedirects() throws Exception {
RequestMatcher matcher = mock(RequestMatcher.class);
given(matcher.matches(any())).willReturn(true);
this.filter.setRequestMatcher(matcher);
HttpServletRequest request = get("http://localhost:8080");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(302);
assertThat(redirectedUrl(response)).isEqualTo("https://localhost:8443");
verify(matcher).matches(any(HttpServletRequest.class));
}
@Test
public void filterWhenRequestIsInsecureThenPortMapperRemapsPort() throws Exception {
PortMapper portMapper = mock(PortMapper.class);
given(portMapper.lookupHttpsPort(314)).willReturn(159);
this.filter.setPortMapper(portMapper);
HttpServletRequest request = get("http://localhost:314");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(302);
assertThat(redirectedUrl(response)).isEqualTo("https://localhost:159");
verify(portMapper).lookupHttpsPort(314);
}
@Test
public void filterWhenRequestIsInsecureAndNoPortMappingThenThrowsIllegalState() {
HttpServletRequest request = get("http://localhost:1234");
HttpServletResponse response = ok();
assertThatIllegalStateException().isThrownBy(() -> this.filter.doFilter(request, response, this.chain));
}
@Test
public void filterWhenInsecureRequestHasAPathThenRedirects() throws Exception {
HttpServletRequest request = get("http://localhost:8080/path/page.html?query=string");
HttpServletResponse response = ok();
this.filter.doFilter(request, response, this.chain);
assertThat(statusCode(response)).isEqualTo(302);
assertThat(redirectedUrl(response)).isEqualTo("https://localhost:8443/path/page.html?query=string");
}
@Test
public void setRequiresTransportSecurityMatcherWhenSetWithNullValueThenThrowsIllegalArgument() {
assertThatIllegalArgumentException().isThrownBy(() -> this.filter.setRequestMatcher(null));
}
@Test
public void setPortMapperWhenSetWithNullValueThenThrowsIllegalArgument() {
assertThatIllegalArgumentException().isThrownBy(() -> this.filter.setPortMapper(null));
}
private String redirectedUrl(HttpServletResponse response) {
return response.getHeader(HttpHeaders.LOCATION);
}
private int statusCode(HttpServletResponse response) {
return response.getStatus();
}
private HttpServletRequest get(String uri) {
UriComponents components = UriComponentsBuilder.fromUriString(uri).build();
MockHttpServletRequest request = new MockHttpServletRequest("GET", components.getPath());
request.setQueryString(components.getQuery());
if (components.getScheme() != null) {
request.setScheme(components.getScheme());
}
int port = components.getPort();
if (port != -1) {
request.setServerPort(port);
}
return request;
}
private HttpServletResponse ok() {
MockHttpServletResponse response = new MockHttpServletResponse();
response.setStatus(200);
return response;
}
}
| HttpsRedirectFilterTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/AvgFunction.java | {
"start": 7477,
"end": 8904
} | class ____ implements FunctionReturnTypeResolver {
private final BasicType<Double> doubleType;
public ReturnTypeResolver(TypeConfiguration typeConfiguration) {
doubleType = typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.DOUBLE );
}
@Override
public BasicValuedMapping resolveFunctionReturnType(
Supplier<BasicValuedMapping> impliedTypeAccess,
List<? extends SqlAstNode> arguments) {
final var impliedType = impliedTypeAccess.get();
if ( impliedType != null ) {
return impliedType;
}
else {
var expression = (Expression) arguments.get( 0 );
final var jdbcMapping = expression.getExpressionType().getSingleJdbcMapping();
return jdbcMapping instanceof BasicPluralType<?, ?>
? (BasicValuedMapping) jdbcMapping
: doubleType;
}
}
@Override
public ReturnableType<?> resolveFunctionReturnType(
ReturnableType<?> impliedType,
@Nullable SqmToSqlAstConverter converter,
List<? extends SqmTypedNode<?>> arguments,
TypeConfiguration typeConfiguration) {
final var expressible = arguments.get( 0 ).getExpressible();
if ( expressible != null ) {
final var domainType = expressible.getSqmType();
if ( domainType instanceof BasicPluralType<?, ?> ) {
return (ReturnableType<?>) domainType;
}
}
return typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.DOUBLE );
}
}
}
| ReturnTypeResolver |
java | elastic__elasticsearch | x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java | {
"start": 9682,
"end": 30752
} | class ____ extends EnvironmentAwareCommand {
// Common option for multiple commands.
// Not every command uses every option, but where they are common we want to keep them consistent
final OptionSpec<String> outputPathSpec;
final OptionSpec<String> outputPasswordSpec;
final OptionSpec<Integer> keysizeSpec;
OptionSpec<String> caKeyUsageSpec;
OptionSpec<Void> pemFormatSpec;
OptionSpec<Integer> daysSpec;
OptionSpec<String> caPkcs12PathSpec;
OptionSpec<String> caCertPathSpec;
OptionSpec<String> caKeyPathSpec;
OptionSpec<String> caPasswordSpec;
OptionSpec<String> caDnSpec;
OptionSpec<Void> multipleNodesSpec;
OptionSpec<String> nameSpec;
OptionSpec<String> dnsNamesSpec;
OptionSpec<String> ipAddressesSpec;
OptionSpec<String> inputFileSpec;
CertificateCommand(String description) {
super(description);
outputPathSpec = parser.accepts("out", "path to the output file that should be produced").withRequiredArg();
outputPasswordSpec = parser.accepts("pass", "password for generated private keys").withOptionalArg();
keysizeSpec = parser.accepts("keysize", "size in bits of RSA keys").withRequiredArg().ofType(Integer.class);
}
final void acceptCertificateGenerationOptions() {
pemFormatSpec = parser.accepts("pem", "output certificates and keys in PEM format instead of PKCS#12");
daysSpec = parser.accepts("days", "number of days that the generated certificates are valid")
.withRequiredArg()
.ofType(Integer.class);
}
final void acceptsCertificateAuthority() {
caPkcs12PathSpec = parser.accepts("ca", "path to an existing ca key pair (in PKCS#12 format)").withRequiredArg();
caCertPathSpec = parser.accepts("ca-cert", "path to an existing ca certificate")
.availableUnless(caPkcs12PathSpec)
.withRequiredArg();
caKeyPathSpec = parser.accepts("ca-key", "path to an existing ca private key")
.availableIf(caCertPathSpec)
.requiredIf(caCertPathSpec)
.withRequiredArg();
caPasswordSpec = parser.accepts("ca-pass", "password for an existing ca private key or the generated ca private key")
.withOptionalArg();
acceptsCertificateAuthorityName();
}
void acceptsCertificateAuthorityName() {
OptionSpecBuilder builder = parser.accepts(
"ca-dn",
"distinguished name to use for the generated ca. defaults to " + AUTO_GEN_CA_DN
);
if (caPkcs12PathSpec != null) {
builder = builder.availableUnless(caPkcs12PathSpec);
}
if (caCertPathSpec != null) {
builder = builder.availableUnless(caCertPathSpec);
}
caDnSpec = builder.withRequiredArg();
}
final void acceptInstanceDetails() {
multipleNodesSpec = parser.accepts("multiple", "generate files for multiple instances");
nameSpec = parser.accepts("name", "name of the generated certificate").availableUnless(multipleNodesSpec).withRequiredArg();
dnsNamesSpec = parser.accepts("dns", "comma separated DNS names").availableUnless(multipleNodesSpec).withRequiredArg();
ipAddressesSpec = parser.accepts("ip", "comma separated IP addresses").availableUnless(multipleNodesSpec).withRequiredArg();
}
final void acceptInputFile() {
inputFileSpec = parser.accepts("in", "file containing details of the instances in yaml format").withRequiredArg();
}
final void acceptCertificateAuthorityKeyUsage() {
caKeyUsageSpec = parser.accepts(
"keyusage",
"comma separated key usages to use for the generated CA. "
+ "defaults to '"
+ Strings.collectionToCommaDelimitedString(DEFAULT_CA_KEY_USAGE)
+ "'"
).withRequiredArg();
}
// For testing
OptionParser getParser() {
return parser;
}
/**
* Checks for output file in the user specified options or prompts the user for the output file.
* The resulting path is stored in the {@code config} parameter.
*/
Path resolveOutputPath(Terminal terminal, OptionSet options, String defaultFilename) throws IOException {
return resolveOutputPath(terminal, outputPathSpec.value(options), defaultFilename);
}
static Path resolveOutputPath(Terminal terminal, String userOption, String defaultFilename) {
Path file;
if (userOption != null) {
file = CertificateTool.resolvePath(userOption);
} else {
file = CertificateTool.resolvePath(defaultFilename);
String input = terminal.readText("Please enter the desired output file [" + file + "]: ");
if (input.isEmpty() == false) {
file = CertificateTool.resolvePath(input);
}
}
return file.toAbsolutePath();
}
final int getKeySize(OptionSet options) {
if (options.has(keysizeSpec)) {
return keysizeSpec.value(options);
} else {
return DEFAULT_KEY_SIZE;
}
}
final List<String> getCaKeyUsage(OptionSet options) {
if (options.has(caKeyUsageSpec)) {
final Function<String, Stream<? extends String>> splitByComma = v -> Stream.of(Strings.splitStringByCommaToArray(v));
final List<String> caKeyUsage = caKeyUsageSpec.values(options)
.stream()
.flatMap(splitByComma)
.filter(v -> false == Strings.isNullOrEmpty(v))
.toList();
if (caKeyUsage.isEmpty()) {
return DEFAULT_CA_KEY_USAGE;
}
return caKeyUsage;
} else {
return DEFAULT_CA_KEY_USAGE;
}
}
final int getDays(OptionSet options) {
if (options.has(daysSpec)) {
return daysSpec.value(options);
} else {
return DEFAULT_DAYS;
}
}
boolean usePemFormat(OptionSet options) {
return options.has(pemFormatSpec);
}
boolean useOutputPassword(OptionSet options) {
return options.has(outputPasswordSpec);
}
char[] getOutputPassword(OptionSet options) {
return getChars(outputPasswordSpec.value(options));
}
protected Path resolvePath(OptionSet options, OptionSpec<String> spec) {
final String value = spec.value(options);
if (Strings.isNullOrEmpty(value)) {
return null;
}
return CertificateTool.resolvePath(value);
}
/**
* Returns the CA certificate and private key that will be used to sign certificates. These may be specified by the user or
* automatically generated
*
* @return CA cert and private key
*/
CAInfo getCAInfo(Terminal terminal, OptionSet options, Environment env) throws Exception {
if (options.has(caPkcs12PathSpec)) {
return loadPkcs12CA(terminal, options, env);
} else if (options.has(caCertPathSpec)) {
return loadPemCA(terminal, options, env);
} else {
terminal.println("Note: Generating certificates without providing a CA certificate is deprecated.");
terminal.println(" A CA certificate will become mandatory in the next major release.");
terminal.println("");
return generateCA(terminal, options);
}
}
private CAInfo loadPkcs12CA(Terminal terminal, OptionSet options, Environment env) throws Exception {
Path path = resolvePath(options, caPkcs12PathSpec);
char[] passwordOption = getChars(caPasswordSpec.value(options));
Map<Certificate, Key> keys = withPassword(
"CA (" + path + ")",
passwordOption,
terminal,
false,
password -> CertParsingUtils.readPkcs12KeyPairs(path, password, a -> password)
);
if (keys.size() != 1) {
throw new IllegalArgumentException(
"expected a single key in file [" + path.toAbsolutePath() + "] but found [" + keys.size() + "]"
);
}
final Map.Entry<Certificate, Key> pair = keys.entrySet().iterator().next();
return new CAInfo((X509Certificate) pair.getKey(), (PrivateKey) pair.getValue());
}
private CAInfo loadPemCA(Terminal terminal, OptionSet options, Environment env) throws Exception {
if (options.hasArgument(caKeyPathSpec) == false) {
throw new UserException(ExitCodes.USAGE, "Option " + caCertPathSpec + " also requires " + caKeyPathSpec);
}
Path cert = resolvePath(options, caCertPathSpec);
Path key = resolvePath(options, caKeyPathSpec);
String password = caPasswordSpec.value(options);
X509Certificate caCert = CertParsingUtils.readX509Certificate(cert);
PrivateKey privateKey = readPrivateKey(key, getChars(password), terminal);
return new CAInfo(caCert, privateKey);
}
CAInfo generateCA(Terminal terminal, OptionSet options) throws Exception {
String dn = caDnSpec.value(options);
if (Strings.isNullOrEmpty(dn)) {
dn = AUTO_GEN_CA_DN;
}
X500Principal x500Principal = new X500Principal(dn);
KeyPair keyPair = CertGenUtils.generateKeyPair(getKeySize(options));
final KeyUsage caKeyUsage = CertGenUtils.buildKeyUsage(getCaKeyUsage(options));
X509Certificate caCert = CertGenUtils.generateCACertificate(x500Principal, keyPair, getDays(options), caKeyUsage);
if (options.hasArgument(caPasswordSpec)) {
char[] password = getChars(caPasswordSpec.value(options));
checkAndConfirmPasswordLengthForOpenSSLCompatibility(password, terminal, false);
return new CAInfo(caCert, keyPair.getPrivate(), true, password);
}
if (options.has(caPasswordSpec)) {
return withPassword("CA Private key", null, terminal, true, p -> new CAInfo(caCert, keyPair.getPrivate(), true, p.clone()));
}
return new CAInfo(caCert, keyPair.getPrivate(), true, null);
}
/**
* This method handles the collection of information about each instance that is necessary to generate a certificate. The user may
* be prompted or the information can be gathered from a file
*
* @return a {@link Collection} of {@link CertificateInformation} that represents each instance
*/
Collection<CertificateInformation> getCertificateInformationList(Terminal terminal, OptionSet options) throws Exception {
final Path input = resolvePath(options, inputFileSpec);
if (input != null) {
return parseAndValidateFile(terminal, input.toAbsolutePath());
}
if (options.has(multipleNodesSpec)) {
return readMultipleCertificateInformation(terminal);
} else {
final Function<String, Stream<? extends String>> splitByComma = v -> Arrays.stream(Strings.splitStringByCommaToArray(v));
final List<String> dns = dnsNamesSpec.values(options).stream().flatMap(splitByComma).collect(Collectors.toList());
final List<String> ip = ipAddressesSpec.values(options).stream().flatMap(splitByComma).collect(Collectors.toList());
final List<String> cn = null;
final String name = getCertificateName(options);
final String fileName;
if (Name.isValidFilename(name)) {
fileName = name;
} else {
fileName = requestFileName(terminal, name);
}
CertificateInformation information = new CertificateInformation(name, fileName, ip, dns, cn);
List<String> validationErrors = information.validate();
if (validationErrors.isEmpty()) {
return Collections.singleton(information);
} else {
validationErrors.forEach(terminal::errorPrintln);
return Collections.emptyList();
}
}
}
protected String getCertificateName(OptionSet options) {
return options.has(nameSpec) ? nameSpec.value(options) : DEFAULT_CERT_NAME;
}
static Collection<CertificateInformation> readMultipleCertificateInformation(Terminal terminal) {
Map<String, CertificateInformation> map = new HashMap<>();
boolean done = false;
while (done == false) {
String name = terminal.readText("Enter instance name: ");
if (name.isEmpty() == false) {
String filename = requestFileName(terminal, name);
String ipAddresses = terminal.readText("Enter IP Addresses for instance (comma-separated if more than one) []: ");
String dnsNames = terminal.readText("Enter DNS names for instance (comma-separated if more than one) []: ");
List<String> ipList = Arrays.asList(Strings.splitStringByCommaToArray(ipAddresses));
List<String> dnsList = Arrays.asList(Strings.splitStringByCommaToArray(dnsNames));
List<String> commonNames = null;
CertificateInformation information = new CertificateInformation(name, filename, ipList, dnsList, commonNames);
List<String> validationErrors = information.validate();
if (validationErrors.isEmpty()) {
if (map.containsKey(name)) {
terminal.println("Overwriting previously defined instance information [" + name + "]");
}
map.put(name, information);
} else {
for (String validationError : validationErrors) {
terminal.println(validationError);
}
terminal.println("Skipping entry as invalid values were found");
}
} else {
terminal.println("A name must be provided");
}
String exit = terminal.readText(
"Would you like to specify another instance? Press 'y' to continue entering instance " + "information: "
);
if ("y".equals(exit) == false) {
done = true;
}
}
return map.values();
}
private static String requestFileName(Terminal terminal, String certName) {
final boolean isNameValidFilename = Name.isValidFilename(certName);
while (true) {
String filename = terminal.readText(
"Enter name for directories and files of " + certName + (isNameValidFilename ? " [" + certName + "]" : "") + ": "
);
if (filename.isEmpty() && isNameValidFilename) {
return certName;
}
if (Name.isValidFilename(filename)) {
return filename;
} else {
terminal.errorPrintln(Terminal.Verbosity.SILENT, "'" + filename + "' is not a valid filename");
continue;
}
}
}
/**
* This method handles writing out the certificate authority in PEM format to a zip file.
*
* @param outputStream the output stream to write to
* @param pemWriter the writer for PEM objects
* @param info the certificate authority information
* @param includeKey if true, write the CA key in PEM format
*/
static void writeCAInfo(ZipOutputStream outputStream, JcaPEMWriter pemWriter, CAInfo info, boolean includeKey) throws Exception {
final String caDirName = createCaDirectory(outputStream);
outputStream.putNextEntry(new ZipEntry(caDirName + "ca.crt"));
pemWriter.writeObject(info.certAndKey.cert);
pemWriter.flush();
outputStream.closeEntry();
if (includeKey) {
outputStream.putNextEntry(new ZipEntry(caDirName + "ca.key"));
if (info.password != null && info.password.length > 0) {
try {
PEMEncryptor encryptor = getEncrypter(info.password);
pemWriter.writeObject(info.certAndKey.key, encryptor);
} finally {
// we can safely nuke the password chars now
Arrays.fill(info.password, (char) 0);
}
} else {
pemWriter.writeObject(info.certAndKey.key);
}
pemWriter.flush();
outputStream.closeEntry();
}
}
private static String createCaDirectory(ZipOutputStream outputStream) throws IOException {
final String caDirName = "ca/";
ZipEntry zipEntry = new ZipEntry(caDirName);
assert zipEntry.isDirectory();
outputStream.putNextEntry(zipEntry);
return caDirName;
}
static void writePkcs12(
String fileName,
OutputStream output,
String alias,
CertificateAndKey pair,
X509Certificate caCert,
char[] password,
Terminal terminal
) throws Exception {
final KeyStore pkcs12 = KeyStore.getInstance("PKCS12");
pkcs12.load(null);
withPassword(fileName, password, terminal, true, p12Password -> {
if (isAscii(p12Password)) {
pkcs12.setKeyEntry(alias, pair.key, p12Password, new Certificate[] { pair.cert });
if (caCert != null) {
pkcs12.setCertificateEntry("ca", caCert);
}
pkcs12.store(output, p12Password);
return null;
} else {
throw new UserException(ExitCodes.CONFIG, "PKCS#12 passwords must be plain ASCII");
}
});
}
/**
* Verify that the provided certificate is validly signed by the provided CA
*/
static void verifyIssuer(Certificate certificate, CAInfo caInfo, Terminal terminal) throws UserException {
try {
certificate.verify(caInfo.certAndKey.cert.getPublicKey());
} catch (GeneralSecurityException e) {
terminal.errorPrintln("");
terminal.errorPrintln("* ERROR *");
terminal.errorPrintln("Verification of generated certificate failed.");
terminal.errorPrintln("This usually occurs if the provided CA certificate does not match with the CA key.");
terminal.errorPrintln("Cause: " + e);
for (var c = e.getCause(); c != null; c = c.getCause()) {
terminal.errorPrintln(" - " + c);
}
throw new UserException(ExitCodes.CONFIG, "Certificate verification failed");
}
}
protected void writePemPrivateKey(
Terminal terminal,
OptionSet options,
ZipOutputStream outputStream,
JcaPEMWriter pemWriter,
String keyFileName,
PrivateKey privateKey
) throws IOException {
final boolean usePassword = useOutputPassword(options);
final char[] outputPassword = getOutputPassword(options);
outputStream.putNextEntry(new ZipEntry(keyFileName));
if (usePassword) {
withPassword(keyFileName, outputPassword, terminal, true, password -> {
pemWriter.writeObject(privateKey, getEncrypter(password));
return null;
});
} else {
pemWriter.writeObject(privateKey);
}
pemWriter.flush();
outputStream.closeEntry();
}
}
static | CertificateCommand |
java | apache__flink | flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SingleThreadMultiplexSourceReaderBase.java | {
"start": 3198,
"end": 6923
} | class ____<
E, T, SplitT extends SourceSplit, SplitStateT>
extends SourceReaderBase<E, T, SplitT, SplitStateT> {
/**
* The primary constructor for the source reader.
*
* <p>The reader will use a handover queue sized as configured via {@link
* SourceReaderOptions#ELEMENT_QUEUE_CAPACITY}.
*/
public SingleThreadMultiplexSourceReaderBase(
Supplier<SplitReader<E, SplitT>> splitReaderSupplier,
RecordEmitter<E, T, SplitStateT> recordEmitter,
Configuration config,
SourceReaderContext context) {
super(
new SingleThreadFetcherManager<>(splitReaderSupplier, config),
recordEmitter,
config,
context);
}
/**
* This constructor behaves like {@link #SingleThreadMultiplexSourceReaderBase(Supplier,
* RecordEmitter, Configuration, SourceReaderContext)}, but accepts a specific {@link
* RateLimiterStrategy}.
*/
public SingleThreadMultiplexSourceReaderBase(
Supplier<SplitReader<E, SplitT>> splitReaderSupplier,
RecordEmitter<E, T, SplitStateT> recordEmitter,
Configuration config,
SourceReaderContext context,
@Nullable RateLimiterStrategy<SplitT> rateLimiterStrategy) {
super(
new SingleThreadFetcherManager<>(splitReaderSupplier, config),
recordEmitter,
null,
config,
context,
rateLimiterStrategy);
}
/**
* This constructor behaves like {@link #SingleThreadMultiplexSourceReaderBase(Supplier,
* RecordEmitter, Configuration, SourceReaderContext)}, but accepts a specific {@link
* SingleThreadFetcherManager}.
*/
public SingleThreadMultiplexSourceReaderBase(
SingleThreadFetcherManager<E, SplitT> splitFetcherManager,
RecordEmitter<E, T, SplitStateT> recordEmitter,
Configuration config,
SourceReaderContext context) {
super(splitFetcherManager, recordEmitter, config, context);
}
/**
* This constructor behaves like {@link #SingleThreadMultiplexSourceReaderBase(Supplier,
* RecordEmitter, Configuration, SourceReaderContext)}, but accepts a specific {@link
* SingleThreadFetcherManager} and {@link RecordEvaluator}.
*/
public SingleThreadMultiplexSourceReaderBase(
SingleThreadFetcherManager<E, SplitT> splitFetcherManager,
RecordEmitter<E, T, SplitStateT> recordEmitter,
@Nullable RecordEvaluator<T> eofRecordEvaluator,
Configuration config,
SourceReaderContext context) {
super(splitFetcherManager, recordEmitter, eofRecordEvaluator, config, context);
}
/**
* This constructor behaves like {@link
* #SingleThreadMultiplexSourceReaderBase(SingleThreadFetcherManager, RecordEmitter,
* RecordEvaluator, Configuration, SourceReaderContext)}, but accepts a specific {@link
* RateLimiterStrategy}.
*/
public SingleThreadMultiplexSourceReaderBase(
SingleThreadFetcherManager<E, SplitT> splitFetcherManager,
RecordEmitter<E, T, SplitStateT> recordEmitter,
@Nullable RecordEvaluator<T> eofRecordEvaluator,
Configuration config,
SourceReaderContext context,
@Nullable RateLimiterStrategy<SplitT> rateLimiterStrategy) {
super(
splitFetcherManager,
recordEmitter,
eofRecordEvaluator,
config,
context,
rateLimiterStrategy);
}
}
| SingleThreadMultiplexSourceReaderBase |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/spi/ThreadContextMapFactory.java | {
"start": 2065,
"end": 2196
} | class ____ {
/**
* Initializes static variables based on system properties. Normally called when this | ThreadContextMapFactory |
java | micronaut-projects__micronaut-core | json-core/src/main/java/io/micronaut/json/codec/JsonStreamMediaTypeCodec.java | {
"start": 1497,
"end": 3671
} | class ____ extends JsonMediaTypeCodec {
public static final String CONFIGURATION_QUALIFIER = "json-stream";
private final List<MediaType> streamAdditionalTypes;
/**
* @param jsonMapper To read/write JSON
* @param applicationConfiguration The common application configurations
* @param codecConfiguration The configuration for the codec
*/
public JsonStreamMediaTypeCodec(JsonMapper jsonMapper,
ApplicationConfiguration applicationConfiguration,
@Named(CONFIGURATION_QUALIFIER) @Nullable CodecConfiguration codecConfiguration) {
super(jsonMapper, applicationConfiguration, null);
if (codecConfiguration != null) {
this.streamAdditionalTypes = codecConfiguration.getAdditionalTypes();
} else {
this.streamAdditionalTypes = Collections.emptyList();
}
}
/**
* @param jsonCodec To read/write JSON
* @param applicationConfiguration The common application configurations
* @param codecConfiguration The configuration for the codec
*/
@Inject
public JsonStreamMediaTypeCodec(BeanProvider<JsonMapper> jsonCodec,
ApplicationConfiguration applicationConfiguration,
@Named(CONFIGURATION_QUALIFIER) @Nullable CodecConfiguration codecConfiguration) {
super(jsonCodec, applicationConfiguration, null);
if (codecConfiguration != null) {
this.streamAdditionalTypes = codecConfiguration.getAdditionalTypes();
} else {
this.streamAdditionalTypes = Collections.emptyList();
}
}
@Override
public Collection<MediaType> getMediaTypes() {
var mediaTypes = new ArrayList<MediaType>();
mediaTypes.add(MediaType.APPLICATION_JSON_STREAM_TYPE);
mediaTypes.addAll(streamAdditionalTypes);
return mediaTypes;
}
@Override
protected MapperMediaTypeCodec cloneWithMapper(JsonMapper mapper) {
return new JsonStreamMediaTypeCodec(mapper, applicationConfiguration, codecConfiguration);
}
}
| JsonStreamMediaTypeCodec |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/exec/spi/JdbcCallFunctionReturn.java | {
"start": 282,
"end": 357
} | interface ____ extends JdbcCallParameterRegistration {
}
| JdbcCallFunctionReturn |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2663/NullableHelper.java | {
"start": 232,
"end": 558
} | class ____ {
private NullableHelper() {
// Helper class
}
public static <T> Nullable<T> jsonNullableToNullable(JsonNullable<T> jsonNullable) {
if ( jsonNullable.isPresent() ) {
return Nullable.of( jsonNullable.get() );
}
return Nullable.undefined();
}
}
| NullableHelper |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/DataTypeJsonSerializer.java | {
"start": 4503,
"end": 7743
} | class ____ only nested types contain custom conversion classes
if (!DataTypeUtils.isDefaultClass(dataType)) {
jsonGenerator.writeStringField(
FIELD_NAME_CONVERSION_CLASS, dataType.getConversionClass().getName());
}
// internal classes only contain nested internal classes
if (isInternal(dataType, false)) {
return;
}
switch (dataType.getLogicalType().getTypeRoot()) {
case ARRAY:
case MULTISET:
final CollectionDataType collectionDataType = (CollectionDataType) dataType;
serializeFieldIfNotDefaultClass(
collectionDataType.getElementDataType(),
FIELD_NAME_ELEMENT_CLASS,
jsonGenerator);
break;
case MAP:
final KeyValueDataType keyValueDataType = (KeyValueDataType) dataType;
serializeFieldIfNotDefaultClass(
keyValueDataType.getKeyDataType(), FIELD_NAME_KEY_CLASS, jsonGenerator);
serializeFieldIfNotDefaultClass(
keyValueDataType.getValueDataType(), FIELD_NAME_VALUE_CLASS, jsonGenerator);
break;
case ROW:
case STRUCTURED_TYPE:
final List<Field> nonDefaultFields =
DataType.getFields(dataType).stream()
.filter(
field ->
!DataTypeUtils.isDefaultClassNested(
field.getDataType()))
.collect(Collectors.toList());
if (nonDefaultFields.isEmpty()) {
break;
}
jsonGenerator.writeFieldName(FIELD_NAME_FIELDS);
jsonGenerator.writeStartArray();
for (Field nonDefaultField : nonDefaultFields) {
jsonGenerator.writeStartObject();
jsonGenerator.writeStringField(
FIELD_NAME_FIELD_NAME, nonDefaultField.getName());
serializeClass(nonDefaultField.getDataType(), jsonGenerator);
jsonGenerator.writeEndObject();
}
jsonGenerator.writeEndArray();
break;
case DISTINCT_TYPE:
final DataType sourceDataType = dataType.getChildren().get(0);
if (!DataTypeUtils.isDefaultClassNested(sourceDataType)) {
serializeClass(sourceDataType, jsonGenerator);
}
break;
default:
// for data types without children
}
}
private static void serializeFieldIfNotDefaultClass(
DataType dataType, String fieldName, JsonGenerator jsonGenerator) throws IOException {
if (!DataTypeUtils.isDefaultClassNested(dataType)) {
jsonGenerator.writeFieldName(fieldName);
jsonGenerator.writeStartObject();
serializeClass(dataType, jsonGenerator);
jsonGenerator.writeEndObject();
}
}
}
| if |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/CamelEvent.java | {
"start": 6777,
"end": 6966
} | interface ____ extends CamelEvent {
Exchange getExchange();
@Override
default Object getSource() {
return getExchange();
}
}
| ExchangeEvent |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java | {
"start": 4624,
"end": 14313
} | class ____ {
private ContainerState state;
// store enough information to be able to cleanup the container
private TaskAttemptId taskAttemptID;
private ContainerId containerID;
final private String containerMgrAddress;
public Container(TaskAttemptId taId, ContainerId containerID,
String containerMgrAddress) {
this.state = ContainerState.PREP;
this.taskAttemptID = taId;
this.containerMgrAddress = containerMgrAddress;
this.containerID = containerID;
}
public synchronized boolean isCompletelyDone() {
return state == ContainerState.DONE || state == ContainerState.FAILED;
}
public synchronized void done() {
state = ContainerState.DONE;
}
@SuppressWarnings("unchecked")
public synchronized void launch(ContainerRemoteLaunchEvent event) {
LOG.info("Launching " + taskAttemptID);
if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
state = ContainerState.DONE;
sendContainerLaunchFailedMsg(taskAttemptID,
"Container was killed before it was launched");
return;
}
ContainerManagementProtocolProxyData proxy = null;
try {
proxy = getCMProxy(containerMgrAddress, containerID);
// Construct the actual Container
ContainerLaunchContext containerLaunchContext =
event.getContainerLaunchContext();
// Now launch the actual container
StartContainerRequest startRequest =
StartContainerRequest.newInstance(containerLaunchContext,
event.getContainerToken());
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(startRequest);
StartContainersRequest requestList = StartContainersRequest.newInstance(list);
StartContainersResponse response =
proxy.getContainerManagementProtocol().startContainers(requestList);
if (response.getFailedRequests() != null
&& response.getFailedRequests().containsKey(containerID)) {
throw response.getFailedRequests().get(containerID).deSerialize();
}
ByteBuffer portInfo =
response.getAllServicesMetaData().get(
ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
int port = -1;
if(portInfo != null) {
port = ShuffleHandler.deserializeMetaData(portInfo);
}
LOG.info("Shuffle port returned by ContainerManager for "
+ taskAttemptID + " : " + port);
if(port < 0) {
this.state = ContainerState.FAILED;
throw new IllegalStateException("Invalid shuffle port number "
+ port + " returned for " + taskAttemptID);
}
// after launching, send launched event to task attempt to move
// it from ASSIGNED to RUNNING state
context.getEventHandler().handle(
new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
this.state = ContainerState.RUNNING;
} catch (Throwable t) {
String message = "Container launch failed for " + containerID + " : "
+ StringUtils.stringifyException(t);
this.state = ContainerState.FAILED;
sendContainerLaunchFailedMsg(taskAttemptID, message);
} finally {
if (proxy != null) {
cmProxy.mayBeCloseProxy(proxy);
}
}
}
public void kill() {
kill(false);
}
@SuppressWarnings("unchecked")
public synchronized void kill(boolean dumpThreads) {
if(this.state == ContainerState.PREP) {
this.state = ContainerState.KILLED_BEFORE_LAUNCH;
} else if (!isCompletelyDone()) {
LOG.info("KILLING " + taskAttemptID);
ContainerManagementProtocolProxyData proxy = null;
try {
proxy = getCMProxy(this.containerMgrAddress, this.containerID);
if (dumpThreads) {
final SignalContainerRequest request = SignalContainerRequest
.newInstance(containerID,
SignalContainerCommand.OUTPUT_THREAD_DUMP);
proxy.getContainerManagementProtocol().signalToContainer(request);
}
// kill the remote container if already launched
List<ContainerId> ids = new ArrayList<ContainerId>();
ids.add(this.containerID);
StopContainersRequest request = StopContainersRequest.newInstance(ids);
StopContainersResponse response =
proxy.getContainerManagementProtocol().stopContainers(request);
if (response.getFailedRequests() != null
&& response.getFailedRequests().containsKey(this.containerID)) {
throw response.getFailedRequests().get(this.containerID)
.deSerialize();
}
} catch (Throwable t) {
// ignore the cleanup failure
String message = "cleanup failed for container "
+ this.containerID + " : "
+ StringUtils.stringifyException(t);
context.getEventHandler()
.handle(
new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID,
message));
LOG.warn(message);
} finally {
if (proxy != null) {
cmProxy.mayBeCloseProxy(proxy);
}
}
this.state = ContainerState.DONE;
}
// after killing, send killed event to task attempt
context.getEventHandler().handle(
new TaskAttemptEvent(this.taskAttemptID,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
}
}
public ContainerLauncherImpl(AppContext context) {
super(ContainerLauncherImpl.class.getName());
this.context = context;
this.stopped = new AtomicBoolean(false);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.limitOnPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
this.initialPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
LOG.info("The thread pool initial size is " + this.initialPoolSize);
super.serviceInit(conf);
cmProxy = new ContainerManagementProtocolProxy(conf);
}
protected void serviceStart() throws Exception {
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(
"ContainerLauncher #%d").setDaemon(true).build();
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new HadoopThreadPoolExecutor(initialPoolSize,
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
new LinkedBlockingQueue<Runnable>(),
tf);
eventHandlingThread = new SubjectInheritingThread() {
@Override
public void work() {
ContainerLauncherEvent event = null;
Set<String> allNodes = new HashSet<String>();
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
allNodes.add(event.getContainerMgrAddress());
int poolSize = launcherPool.getCorePoolSize();
// See if we need up the pool size only if haven't reached the
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes = allNodes.size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize < idealPoolSize) {
// Bump up the pool size to idealPoolSize+initialPoolSize, the
// later is just a buffer so we are not always increasing the
// pool-size
int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize
+ initialPoolSize);
LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
+ " as number-of-nodes to talk to is " + numNodes);
launcherPool.setCorePoolSize(newPoolSize);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(createEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
super.serviceStart();
}
private void shutdownAllContainers() {
for (Container ct : this.containers.values()) {
if (ct != null) {
ct.kill();
}
}
}
protected void serviceStop() throws Exception {
if (stopped.getAndSet(true)) {
// return if already stopped
return;
}
// shutdown any containers that might be left running
shutdownAllContainers();
if (eventHandlingThread != null) {
eventHandlingThread.interrupt();
}
if (launcherPool != null) {
launcherPool.shutdownNow();
}
super.serviceStop();
}
protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
return new EventProcessor(event);
}
/**
* Setup and start the container on remote nodemanager.
*/
| Container |
java | spring-projects__spring-security | cas/src/main/java/org/springframework/security/cas/jackson2/CasAuthenticationTokenMixin.java | {
"start": 1339,
"end": 2554
} | class ____ helps in deserialize
* {@link org.springframework.security.cas.authentication.CasAuthenticationToken} using
* jackson. Two more dependent classes needs to register along with this mixin class.
* <ol>
* <li>{@link org.springframework.security.cas.jackson2.AssertionImplMixin}</li>
* <li>{@link org.springframework.security.cas.jackson2.AttributePrincipalImplMixin}</li>
* </ol>
*
* <p>
* <pre>
* ObjectMapper mapper = new ObjectMapper();
* mapper.registerModule(new CasJackson2Module());
* </pre>
*
* @author Jitendra Singh
* @since 4.2
* @see CasJackson2Module
* @see org.springframework.security.jackson2.SecurityJackson2Modules
* @deprecated as of 7.0 in favor of
* {@code org.springframework.security.cas.jackson.CasAuthenticationTokenMixin} based on
* Jackson 3
*/
@SuppressWarnings("removal")
@Deprecated(forRemoval = true)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, isGetterVisibility = JsonAutoDetect.Visibility.NONE,
getterVisibility = JsonAutoDetect.Visibility.NONE, creatorVisibility = JsonAutoDetect.Visibility.ANY)
@JsonIgnoreProperties(ignoreUnknown = true)
| which |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/util/ASTHelpers.java | {
"start": 33349,
"end": 35257
} | class ____ of the annotation (e.g.
* "org.jspecify.annotations.Nullable", or "some.package.OuterClassName$InnerClassName")
* @return true if the symbol is annotated with given type.
*/
public static boolean hasAnnotation(Symbol sym, String annotationClass, VisitorState state) {
if (sym == null) {
return false;
}
// TODO(amalloy): unify with hasAnnotation(Symbol, Name, VisitorState)
// normalize to non-binary names
annotationClass = annotationClass.replace('$', '.');
Name annotationName = state.getName(annotationClass);
if (hasAttribute(sym, annotationName)) {
return true;
}
if (sym instanceof ClassSymbol cs && isInherited(state, annotationClass)) {
for (sym = cs.getSuperclass().tsym;
sym instanceof ClassSymbol cs2;
sym = cs2.getSuperclass().tsym) {
if (hasAttribute(sym, annotationName)) {
return true;
}
}
}
return false;
}
/**
* Check for the presence of an annotation, considering annotation inheritance.
*
* @return true if the symbol is annotated with given type.
* @deprecated prefer {@link #hasAnnotation(Symbol, String, VisitorState)} to avoid needing a
* runtime dependency on the annotation class, and to prevent issues if there is skew between
* the definition of the annotation on the runtime and compile-time classpaths
*/
@InlineMe(
replacement = "ASTHelpers.hasAnnotation(sym, annotationClass.getName(), state)",
imports = {"com.google.errorprone.util.ASTHelpers"})
@Deprecated
public static boolean hasAnnotation(
Symbol sym, Class<? extends Annotation> annotationClass, VisitorState state) {
return hasAnnotation(sym, annotationClass.getName(), state);
}
/**
* Check for the presence of an annotation, considering annotation inheritance.
*
* @param annotationClass the binary | name |
java | apache__camel | components/camel-jpa/src/test/java/org/apache/camel/component/jpa/JpaUseMergeTest.java | {
"start": 1246,
"end": 3598
} | class ____ extends AbstractJpaMethodTest {
@Override
public boolean usePersist() {
return false;
}
@Test
public void produceExistingEntity() throws Exception {
setUp("jpa://" + Customer.class.getName() + "?usePersist=false");
final Customer customer = createDefaultCustomer();
transactionTemplate.execute(new TransactionCallback<Object>() {
public Object doInTransaction(TransactionStatus status) {
entityManager.joinTransaction();
entityManager.persist(customer);
entityManager.flush();
return null;
}
});
assertEntitiesInDatabase(1, Customer.class.getName());
assertEntitiesInDatabase(1, Address.class.getName());
// do detach the persisted entity first before modifying it as we intend to merge it later on below
entityManager.detach(customer);
customer.setName("Max Mustermann");
customer.getAddress().setAddressLine1("Musterstr. 1");
customer.getAddress().setAddressLine2("11111 Enterhausen");
Customer receivedCustomer = template.requestBody(endpoint, customer, Customer.class);
assertEquals(customer.getName(), receivedCustomer.getName());
assertNotNull(receivedCustomer.getId());
assertEquals(customer.getAddress().getAddressLine1(), receivedCustomer.getAddress().getAddressLine1());
assertEquals(customer.getAddress().getAddressLine2(), receivedCustomer.getAddress().getAddressLine2());
assertNotNull(receivedCustomer.getAddress().getId());
List<?> results = entityManager.createQuery("select o from " + Customer.class.getName() + " o").getResultList();
assertEquals(1, results.size());
Customer persistedCustomer = (Customer) results.get(0);
assertEquals(receivedCustomer.getName(), persistedCustomer.getName());
assertEquals(receivedCustomer.getId(), persistedCustomer.getId());
assertEquals(receivedCustomer.getAddress().getAddressLine1(), persistedCustomer.getAddress().getAddressLine1());
assertEquals(receivedCustomer.getAddress().getAddressLine2(), persistedCustomer.getAddress().getAddressLine2());
assertEquals(receivedCustomer.getAddress().getId(), persistedCustomer.getAddress().getId());
}
}
| JpaUseMergeTest |
java | apache__camel | components/camel-telegram/src/main/java/org/apache/camel/component/telegram/TelegramService.java | {
"start": 1136,
"end": 1407
} | interface ____ {
UpdateResult getUpdates(Long offset, Integer limit, Integer timeoutSeconds);
void sendMessage(Exchange exchange, AsyncCallback callback, OutgoingMessage message);
boolean setWebhook(String url);
boolean removeWebhook();
}
| TelegramService |
java | greenrobot__EventBus | EventBusPerformance/src/org/greenrobot/eventbusperf/testsubject/PerfTestOtto.java | {
"start": 3261,
"end": 3828
} | class ____ extends PerfTestOtto {
public RegisterAll(Context context, TestParams params) {
super(context, params);
}
public void runTest() {
super.registerUnregisterOneSubscribers();
long timeNanos = super.registerSubscribers();
primaryResultMicros = timeNanos / 1000;
primaryResultCount = params.getSubscriberCount();
}
@Override
public String getDisplayName() {
return "Otto Register, no unregister";
}
}
public static | RegisterAll |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/AdviceWithWeaveByToUriPollTest.java | {
"start": 1117,
"end": 2875
} | class ____ extends ContextTestSupport {
@Test
public void testAdvicePollToString() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
weaveByToString("poll*").replace().to("mock:foo");
mockEndpointsAndSkip("direct:foo*");
}
});
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testAdvicePollToUri() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
weaveByToUri("seda:bar").replace().to("mock:foo");
mockEndpointsAndSkip("direct:foo*");
}
});
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.poll("seda:bar").to("mock:result");
}
};
}
}
| AdviceWithWeaveByToUriPollTest |
java | google__guava | android/guava-tests/test/com/google/common/collect/SpecialRandom.java | {
"start": 725,
"end": 857
} | class ____ being able to seed a {@link Random} value with a passed in seed from a
* benchmark parameter.
*
* <p>TODO: Remove this | for |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/JavaPathType.java | {
"start": 4251,
"end": 7190
} | class ____. Compatibility can
* be achieved, for example, by repeating in the {@code META-INF/services/} directory the services
* that are declared in the {@code module-info.class} file. In that case, the path type can be chosen
* by the plugin.</li>
* <li>If a {@link #patchModule(String)} is also set and the main JAR file is placed on the module path,
* then the test dependency will be placed on the Java {@code --patch-module} option instead of the
* {@code --module-path} option.</li>
* </ul>
*/
MODULES(StandardLocation.MODULE_PATH, "--module-path"),
/**
* The path identified by the Java {@code --upgrade-module-path} option.
* The Java tools location is {@link StandardLocation#UPGRADE_MODULE_PATH}.
*/
UPGRADE_MODULES(StandardLocation.UPGRADE_MODULE_PATH, "--upgrade-module-path"),
/**
* The path identified by the Java {@code --patch-module} option.
* The Java tools location is {@link StandardLocation#PATCH_MODULE_PATH}.
*
* Note that this option is incomplete, because it must be followed by a module name.
* Use this type only when the module to patch is unknown.
*
* @see #patchModule(String)
*/
PATCH_MODULE(StandardLocation.PATCH_MODULE_PATH, "--patch-module"),
/**
* The path identified by the Java {@code --processor-path} option.
* The Java tools location is {@link StandardLocation#ANNOTATION_PROCESSOR_PATH}.
*/
PROCESSOR_CLASSES(StandardLocation.ANNOTATION_PROCESSOR_PATH, "--processor-path"),
/**
* The path identified by the Java {@code --processor-module-path} option.
* The Java tools location is {@link StandardLocation#ANNOTATION_PROCESSOR_MODULE_PATH}.
*/
PROCESSOR_MODULES(StandardLocation.ANNOTATION_PROCESSOR_MODULE_PATH, "--processor-module-path"),
/**
* The path identified by the Java {@code -agentpath} option.
*/
AGENT(null, "-agentpath"),
/**
* The path identified by the Javadoc {@code -doclet} option.
* The Java tools location is {@link DocumentationTool.Location#DOCLET_PATH}.
*/
DOCLET(DocumentationTool.Location.DOCLET_PATH, "-doclet"),
/**
* The path identified by the Javadoc {@code -tagletpath} option.
* The Java tools location is {@link DocumentationTool.Location#TAGLET_PATH}.
*/
TAGLETS(DocumentationTool.Location.TAGLET_PATH, "-tagletpath");
/**
* Creates a path identified by the Java {@code --patch-module} option.
* Contrarily to the other types of paths, this path is applied to only
* one specific module. Used for compilation and execution among others.
*
* <h4>Context-sensitive interpretation</h4>
* This path type makes sense only when a main module is added on the module path by another dependency.
* In no main module is found, the patch dependency may be added on the | path |
java | spring-projects__spring-framework | spring-core/src/jmh/java/org/springframework/core/codec/StringDecoderBenchmark.java | {
"start": 1700,
"end": 1986
} | class ____ {
@Benchmark
public void parseSseLines(SseLinesState state, Blackhole blackhole) {
blackhole.consume(state.parseLines().blockLast());
}
@State(Scope.Benchmark)
@SuppressWarnings({"NotNullFieldNotInitialized", "ConstantConditions"})
public static | StringDecoderBenchmark |
java | apache__rocketmq | broker/src/test/java/org/apache/rocketmq/broker/processor/PopMessageProcessorTest.java | {
"start": 3066,
"end": 13254
} | class ____ {
private PopMessageProcessor popMessageProcessor;
@Spy
private BrokerController brokerController = new BrokerController(new BrokerConfig(), new NettyServerConfig(), new NettyClientConfig(), new MessageStoreConfig());
@Mock
private ChannelHandlerContext handlerContext;
private final EmbeddedChannel embeddedChannel = new EmbeddedChannel();
@Mock
private DefaultMessageStore messageStore;
private ClientChannelInfo clientChannelInfo;
private String group = "FooBarGroup";
private String topic = "FooBar";
@Before
public void init() {
brokerController.setMessageStore(messageStore);
brokerController.getBrokerConfig().setEnablePopBufferMerge(true);
// Initialize BrokerMetricsManager to prevent NPE in tests
brokerController.setBrokerMetricsManager(new BrokerMetricsManager(brokerController));
popMessageProcessor = new PopMessageProcessor(brokerController);
when(handlerContext.channel()).thenReturn(embeddedChannel);
brokerController.getTopicConfigManager().getTopicConfigTable().put(topic, new TopicConfig(topic));
clientChannelInfo = new ClientChannelInfo(embeddedChannel);
ConsumerData consumerData = createConsumerData(group, topic);
brokerController.getConsumerManager().registerConsumer(
consumerData.getGroupName(),
clientChannelInfo,
consumerData.getConsumeType(),
consumerData.getMessageModel(),
consumerData.getConsumeFromWhere(),
consumerData.getSubscriptionDataSet(),
false);
}
@Test
public void testProcessRequest_TopicNotExist() throws RemotingCommandException {
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
brokerController.getTopicConfigManager().getTopicConfigTable().remove(topic);
final RemotingCommand request = createPopMsgCommand();
RemotingCommand response = popMessageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
assertThat(response.getRemark()).contains("topic[" + topic + "] not exist");
}
@Test
public void testProcessRequest_Found() throws RemotingCommandException, InterruptedException {
GetMessageResult getMessageResult = createGetMessageResult(1);
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(CompletableFuture.completedFuture(getMessageResult));
final RemotingCommand request = createPopMsgCommand();
popMessageProcessor.processRequest(handlerContext, request);
RemotingCommand response = embeddedChannel.readOutbound();
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testProcessRequest_MsgWasRemoving() throws RemotingCommandException {
GetMessageResult getMessageResult = createGetMessageResult(1);
getMessageResult.setStatus(GetMessageStatus.MESSAGE_WAS_REMOVING);
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(CompletableFuture.completedFuture(getMessageResult));
final RemotingCommand request = createPopMsgCommand();
popMessageProcessor.processRequest(handlerContext, request);
RemotingCommand response = embeddedChannel.readOutbound();
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testProcessRequest_NoMsgInQueue() throws RemotingCommandException {
GetMessageResult getMessageResult = createGetMessageResult(0);
getMessageResult.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE);
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(CompletableFuture.completedFuture(getMessageResult));
final RemotingCommand request = createPopMsgCommand();
RemotingCommand response = popMessageProcessor.processRequest(handlerContext, request);
assertThat(response).isNull();
}
@Test
public void testProcessRequest_whenTimerWheelIsFalse() throws RemotingCommandException {
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
messageStoreConfig.setTimerWheelEnable(false);
when(messageStore.getMessageStoreConfig()).thenReturn(messageStoreConfig);
final RemotingCommand request = createPopMsgCommand();
RemotingCommand response = popMessageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR);
assertThat(response.getRemark()).contains("pop message is forbidden because timerWheelEnable is false");
}
@Test
public void testGetInitOffset_retryTopic() throws RemotingCommandException {
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
String newGroup = group + "-" + System.currentTimeMillis();
String retryTopic = KeyBuilder.buildPopRetryTopic(topic, newGroup);
long minOffset = 100L;
when(messageStore.getMinOffsetInQueue(retryTopic, 0)).thenReturn(minOffset);
brokerController.getTopicConfigManager().getTopicConfigTable().put(retryTopic, new TopicConfig(retryTopic, 1, 1));
GetMessageResult getMessageResult = createGetMessageResult(0);
when(messageStore.getMessageAsync(eq(newGroup), anyString(), anyInt(), anyLong(), anyInt(), any()))
.thenReturn(CompletableFuture.completedFuture(getMessageResult));
long offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, retryTopic, 0);
Assert.assertEquals(-1, offset);
RemotingCommand request = createPopMsgCommand(newGroup, topic, 0, ConsumeInitMode.MAX);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, retryTopic, 0);
Assert.assertEquals(minOffset, offset);
when(messageStore.getMinOffsetInQueue(retryTopic, 0)).thenReturn(minOffset * 2);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, retryTopic, 0);
Assert.assertEquals(minOffset, offset); // will not entry getInitOffset() again
messageStore.getMinOffsetInQueue(retryTopic, 0); // prevent UnnecessaryStubbingException
}
@Test
public void testGetInitOffset_normalTopic() throws RemotingCommandException, ConsumeQueueException {
long maxOffset = 999L;
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMaxOffsetInQueue(topic, 0)).thenReturn(maxOffset);
String newGroup = group + "-" + System.currentTimeMillis();
GetMessageResult getMessageResult = createGetMessageResult(0);
when(messageStore.getMessageAsync(eq(newGroup), anyString(), anyInt(), anyLong(), anyInt(), any()))
.thenReturn(CompletableFuture.completedFuture(getMessageResult));
long offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(-1, offset);
RemotingCommand request = createPopMsgCommand(newGroup, topic, 0, ConsumeInitMode.MAX);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(maxOffset - 1, offset); // checkInMem return false
when(messageStore.getMaxOffsetInQueue(topic, 0)).thenReturn(maxOffset * 2);
popMessageProcessor.processRequest(handlerContext, request);
offset = brokerController.getConsumerOffsetManager().queryOffset(newGroup, topic, 0);
Assert.assertEquals(maxOffset - 1, offset); // will not entry getInitOffset() again
messageStore.getMaxOffsetInQueue(topic, 0); // prevent UnnecessaryStubbingException
}
private RemotingCommand createPopMsgCommand() {
return createPopMsgCommand(group, topic, -1, ConsumeInitMode.MAX);
}
private RemotingCommand createPopMsgCommand(String group, String topic, int queueId, int initMode) {
PopMessageRequestHeader requestHeader = new PopMessageRequestHeader();
requestHeader.setConsumerGroup(group);
requestHeader.setMaxMsgNums(30);
requestHeader.setQueueId(queueId);
requestHeader.setTopic(topic);
requestHeader.setInvisibleTime(10_000);
requestHeader.setInitMode(initMode);
requestHeader.setOrder(false);
requestHeader.setPollTime(15_000);
requestHeader.setBornTime(System.currentTimeMillis());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.POP_MESSAGE, requestHeader);
request.makeCustomHeaderToNet();
return request;
}
private GetMessageResult createGetMessageResult(int msgCnt) {
GetMessageResult getMessageResult = new GetMessageResult();
getMessageResult.setStatus(GetMessageStatus.FOUND);
getMessageResult.setMinOffset(100);
getMessageResult.setMaxOffset(1024);
getMessageResult.setNextBeginOffset(516);
for (int i = 0; i < msgCnt; i++) {
ByteBuffer bb = ByteBuffer.allocate(64);
bb.putLong(MessageDecoder.MESSAGE_STORE_TIMESTAMP_POSITION, System.currentTimeMillis());
getMessageResult.addMessage(new SelectMappedBufferResult(200, bb, 64, new DefaultMappedFile()));
}
return getMessageResult;
}
}
| PopMessageProcessorTest |
java | resilience4j__resilience4j | resilience4j-micronaut/src/main/java/io/github/resilience4j/micronaut/retry/RetryRegistryFactory.java | {
"start": 1934,
"end": 6464
} | class ____ {
@Bean
@RetryQualifier
public CompositeCustomizer<RetryConfigCustomizer> compositeTimeLimiterCustomizer(@Nullable List<RetryConfigCustomizer> configCustomizers) {
return new CompositeCustomizer<>(configCustomizers);
}
@Singleton
@Requires(beans = CommonRetryConfigurationProperties.class)
public RetryRegistry createRetryRegistry(
CommonRetryConfigurationProperties retryConfigurationProperties,
@RetryQualifier EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
@RetryQualifier RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@RetryQualifier CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties,
retryRegistryEventConsumer, compositeRetryCustomizer);
registerEventConsumer(retryRegistry, retryEventConsumerRegistry,
retryConfigurationProperties);
retryConfigurationProperties.getInstances()
.forEach((name, properties) ->
retryRegistry.retry(name, retryConfigurationProperties
.createRetryConfig(name, compositeRetryCustomizer)));
return retryRegistry;
}
@Bean
@RetryQualifier
public EventConsumerRegistry<RetryEvent> retryEventEventConsumerRegistry() {
return new DefaultEventConsumerRegistry<>();
}
@Bean
@Primary
@RetryQualifier
public RegistryEventConsumer<Retry> retryRegistryEventConsumer(
Optional<List<RegistryEventConsumer<Retry>>> optionalRegistryEventConsumers
) {
return new CompositeRegistryEventConsumer<>(
optionalRegistryEventConsumers.orElseGet(ArrayList::new)
);
}
/**
* Registers the post creation consumer function that registers the consumer events to the rate
* limiters.
*
* @param retryRegistry The rate limiter registry.
* @param eventConsumerRegistry The event consumer registry.
*/
private void registerEventConsumer(RetryRegistry retryRegistry,
EventConsumerRegistry<RetryEvent> eventConsumerRegistry,
CommonRetryConfigurationProperties rateLimiterConfigurationProperties) {
retryRegistry.getEventPublisher()
.onEntryAdded(event -> registerEventConsumer(eventConsumerRegistry, event.getAddedEntry(), rateLimiterConfigurationProperties))
.onEntryReplaced(event -> registerEventConsumer(eventConsumerRegistry, event.getNewEntry(), rateLimiterConfigurationProperties))
.onEntryRemoved(event -> unregisterEventConsumer(eventConsumerRegistry, event.getRemovedEntry()));
}
private void unregisterEventConsumer(EventConsumerRegistry<RetryEvent> eventConsumerRegistry, Retry retry) {
eventConsumerRegistry.removeEventConsumer(retry.getName());
}
private void registerEventConsumer(
EventConsumerRegistry<RetryEvent> eventConsumerRegistry, Retry retry,
CommonRetryConfigurationProperties retryConfigurationProperties) {
int eventConsumerBufferSize = Optional.ofNullable(retryConfigurationProperties.getBackendProperties(retry.getName()))
.map(CommonRetryConfigurationProperties.InstanceProperties::getEventConsumerBufferSize)
.orElse(100);
retry.getEventPublisher().onEvent(eventConsumerRegistry.createEventConsumer(retry.getName(), eventConsumerBufferSize));
}
/**
* Initializes a rate limiter registry.
*
* @param retryProperties The rate limiter configuration properties.
* @param compositeRateLimiterCustomizer the composite rate limiter customizer delegate
* @return a RateLimiterRegistry
*/
private RetryRegistry createRetryRegistry(
CommonRetryConfigurationProperties retryProperties,
RegistryEventConsumer<Retry> rateLimiterRegistryEventConsumer,
CompositeCustomizer<RetryConfigCustomizer> compositeRateLimiterCustomizer) {
Map<String, RetryConfig> configs = retryProperties.getConfigs()
.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey,
entry -> retryProperties
.createRetryConfig(entry.getValue(), compositeRateLimiterCustomizer,
entry.getKey())));
return RetryRegistry.of(configs, rateLimiterRegistryEventConsumer, retryProperties.getTags());
}
}
| RetryRegistryFactory |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PatternMatchingInstanceofTest.java | {
"start": 4861,
"end": 5189
} | class ____ {
void test(Object o) {
if (!(o instanceof Test)) {
return;
}
Test test = (Test) o;
test(test);
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/idgen/foreign/ForeignGeneratorResourceLocalTest.java | {
"start": 5028,
"end": 5258
} | class ____ {
@Id
@GeneratedValue
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
@Entity(name = "Customer")
@Table(name = "CUSTOMER")
public static | Contract |
java | grpc__grpc-java | services/src/main/java/io/grpc/protobuf/services/BinaryLogSink.java | {
"start": 745,
"end": 862
} | class ____ accepts binary log messages.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4017")
public | that |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnMissingFilterBeanTests.java | {
"start": 4735,
"end": 4962
} | class ____ {
@Bean
FilterRegistrationBean<OtherFilter> myOtherFilter() {
return new FilterRegistrationBean<>(new OtherFilter());
}
}
@Configuration(proxyBeanMethods = false)
static | WithoutTestFilterRegistrationConfig |
java | netty__netty | codec-stomp/src/main/java/io/netty/handler/codec/stomp/DefaultStompFrame.java | {
"start": 881,
"end": 2906
} | class ____ extends DefaultStompHeadersSubframe implements StompFrame {
private final ByteBuf content;
public DefaultStompFrame(StompCommand command) {
this(command, Unpooled.buffer(0));
}
public DefaultStompFrame(StompCommand command, ByteBuf content) {
this(command, content, null);
}
DefaultStompFrame(StompCommand command, ByteBuf content, DefaultStompHeaders headers) {
super(command, headers);
this.content = ObjectUtil.checkNotNull(content, "content");
}
@Override
public ByteBuf content() {
return content;
}
@Override
public StompFrame copy() {
return replace(content.copy());
}
@Override
public StompFrame duplicate() {
return replace(content.duplicate());
}
@Override
public StompFrame retainedDuplicate() {
return replace(content.retainedDuplicate());
}
@Override
public StompFrame replace(ByteBuf content) {
return new DefaultStompFrame(command, content, headers.copy());
}
@Override
public int refCnt() {
return content.refCnt();
}
@Override
public StompFrame retain() {
content.retain();
return this;
}
@Override
public StompFrame retain(int increment) {
content.retain(increment);
return this;
}
@Override
public StompFrame touch() {
content.touch();
return this;
}
@Override
public StompFrame touch(Object hint) {
content.touch(hint);
return this;
}
@Override
public boolean release() {
return content.release();
}
@Override
public boolean release(int decrement) {
return content.release(decrement);
}
@Override
public String toString() {
return "DefaultStompFrame{" +
"command=" + command +
", headers=" + headers +
", content=" + content.toString(CharsetUtil.UTF_8) +
'}';
}
}
| DefaultStompFrame |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/sql_load_script/ImportSqlLoadScriptTestCase.java | {
"start": 300,
"end": 917
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyEntity.class, SqlLoadScriptTestResource.class)
.addAsResource("application-import-load-script-test.properties", "application.properties")
.addAsResource("import.sql"));
@Test
public void testImportSqlLoadScriptTest() {
String name = "import.sql load script entity";
RestAssured.when().get("/orm-sql-load-script/2").then().body(Matchers.is(name));
}
}
| ImportSqlLoadScriptTestCase |
java | apache__camel | core/camel-core-engine/src/main/java/org/apache/camel/impl/DefaultDumpRoutesStrategy.java | {
"start": 2629,
"end": 32013
} | class ____ extends ServiceSupport implements DumpRoutesStrategy, CamelContextAware {
private static final Logger LOG = LoggerFactory.getLogger(DefaultDumpRoutesStrategy.class);
private static final String DIVIDER = "--------------------------------------------------------------------------------";
private final AtomicInteger counter = new AtomicInteger();
private CamelContext camelContext;
private String include = "routes";
private boolean resolvePlaceholders = true;
private boolean uriAsParameters;
private boolean generatedIds = true;
private boolean log = true;
private String output;
private String outputFileName;
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
public String getInclude() {
return include;
}
public void setInclude(String include) {
this.include = include;
}
public boolean isResolvePlaceholders() {
return resolvePlaceholders;
}
public void setResolvePlaceholders(boolean resolvePlaceholders) {
this.resolvePlaceholders = resolvePlaceholders;
}
public boolean isGeneratedIds() {
return generatedIds;
}
public void setGeneratedIds(boolean generatedIds) {
this.generatedIds = generatedIds;
}
public boolean isLog() {
return log;
}
public void setLog(boolean log) {
this.log = log;
}
public String getOutput() {
return output;
}
public void setOutput(String output) {
String name = FileUtil.stripPath(output);
if (name != null && name.contains(".")) {
outputFileName = name;
this.output = FileUtil.onlyPath(output);
if (this.output == null || this.output.isEmpty()) {
this.output = ".";
}
} else {
this.output = output;
}
}
public boolean isUriAsParameters() {
return uriAsParameters;
}
public void setUriAsParameters(boolean uriAsParameters) {
this.uriAsParameters = uriAsParameters;
}
@Override
public void dumpRoutes(String format) {
if ("yaml".equalsIgnoreCase(format)) {
doDumpRoutesAsYaml(camelContext);
} else if ("xml".equalsIgnoreCase(format)) {
doDumpRoutesAsXml(camelContext);
}
}
protected void doDumpRoutesAsYaml(CamelContext camelContext) {
final ModelToYAMLDumper dumper = PluginHelper.getModelToYAMLDumper(camelContext);
final Model model = camelContext.getCamelContextExtension().getContextPlugin(Model.class);
final DummyResource dummy = new DummyResource(null, null);
final Set<String> files = new HashSet<>();
if (include.contains("*") || include.contains("all") || include.contains("beans")) {
int size = model.getCustomBeans().size();
if (size > 0) {
Map<Resource, List<BeanFactoryDefinition>> groups = new LinkedHashMap<>();
for (BeanFactoryDefinition bean : model.getCustomBeans()) {
Resource res = bean.getResource();
if (res == null) {
res = dummy;
}
List<BeanFactoryDefinition> beans = groups.computeIfAbsent(res, resource -> new ArrayList<>());
beans.add(bean);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, List<BeanFactoryDefinition>> entry : groups.entrySet()) {
List<BeanFactoryDefinition> beans = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYamlBeans(camelContext, beans, resource == dummy ? null : resource, dumper, "beans", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "beans", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} beans as YAML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("dataFormats")) {
int size = model.getDataFormats().size();
if (size > 0) {
Map<Resource, Map<String, DataFormatDefinition>> groups = new LinkedHashMap<>();
for (Map.Entry<String, DataFormatDefinition> entry : model.getDataFormats().entrySet()) {
Resource res = entry.getValue().getResource();
if (res == null) {
res = dummy;
}
Map<String, DataFormatDefinition> dfs = groups.computeIfAbsent(res, resource -> new LinkedHashMap<>());
dfs.put(entry.getKey(), entry.getValue());
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, Map<String, DataFormatDefinition>> entry : groups.entrySet()) {
Map<String, DataFormatDefinition> dfs = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYamlDataFormats(camelContext, dfs, resource == dummy ? null : resource, dumper, "dataFormats",
sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "dataFormats", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} data formats as YAML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("rests")) {
int size = model.getRestDefinitions().size();
if (size > 0) {
Map<Resource, RestsDefinition> groups = new LinkedHashMap<>();
for (RestDefinition rest : model.getRestDefinitions()) {
Resource res = rest.getResource();
if (res == null) {
res = dummy;
}
RestsDefinition rests = groups.computeIfAbsent(res, resource -> new RestsDefinition());
rests.getRests().add(rest);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RestsDefinition> entry : groups.entrySet()) {
RestsDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYaml(camelContext, def, resource == dummy ? null : resource, dumper, "rests", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "rests", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} rests as YAML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routeConfigurations")
|| include.contains("route-configurations")) {
int size = model.getRouteConfigurationDefinitions().size();
if (size > 0) {
Map<Resource, RouteConfigurationsDefinition> groups = new LinkedHashMap<>();
for (RouteConfigurationDefinition config : model.getRouteConfigurationDefinitions()) {
Resource res = config.getResource();
if (res == null) {
res = dummy;
}
RouteConfigurationsDefinition routes
= groups.computeIfAbsent(res, resource -> new RouteConfigurationsDefinition());
routes.getRouteConfigurations().add(config);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RouteConfigurationsDefinition> entry : groups.entrySet()) {
RouteConfigurationsDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYaml(camelContext, def, resource == dummy ? null : resource, dumper, "route-configurations", sbLocal,
sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "route-configurations", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} route-configurations as YAML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routeTemplates")
|| include.contains("route-templates")) {
int size = model.getRouteTemplateDefinitions().size();
if (size > 0) {
Map<Resource, RouteTemplatesDefinition> groups = new LinkedHashMap<>();
for (RouteTemplateDefinition rt : model.getRouteTemplateDefinitions()) {
Resource res = rt.getResource();
if (res == null) {
res = dummy;
}
RouteTemplatesDefinition rests = groups.computeIfAbsent(res, resource -> new RouteTemplatesDefinition());
rests.getRouteTemplates().add(rt);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RouteTemplatesDefinition> entry : groups.entrySet()) {
RouteTemplatesDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYaml(camelContext, def, resource == dummy ? null : resource, dumper, "route-templates", sbLocal,
sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "route-templates", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} route-templates as YAML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routes")) {
int size = model.getRouteDefinitions().size();
if (size > 0) {
Map<Resource, RoutesDefinition> groups = new LinkedHashMap<>();
for (RouteDefinition route : model.getRouteDefinitions()) {
if ((route.isRest() != null && route.isRest()) || (route.isTemplate() != null && route.isTemplate())) {
// skip routes that are rest/templates
continue;
}
Resource res = route.getResource();
if (res == null) {
res = dummy;
}
RoutesDefinition routes = groups.computeIfAbsent(res, resource -> new RoutesDefinition());
routes.getRoutes().add(route);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RoutesDefinition> entry : groups.entrySet()) {
RoutesDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpYaml(camelContext, def, resource == dummy ? null : resource, dumper, "routes", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "routes", "yaml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} routes as YAML", size);
LOG.info("{}", sbLog);
}
}
}
}
protected void doDumpYaml(
CamelContext camelContext, NamedNode def, Resource resource,
ModelToYAMLDumper dumper, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String dump = dumper.dumpModelAsYaml(camelContext, def, resolvePlaceholders, uriAsParameters, generatedIds, false);
sbLocal.append(dump);
appendLogDump(resource, dump, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to YAML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
protected void doDumpYamlBeans(
CamelContext camelContext, List beans, Resource resource,
ModelToYAMLDumper dumper, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String dump = dumper.dumpBeansAsYaml(camelContext, beans);
sbLocal.append(dump);
appendLogDump(resource, dump, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to YAML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
protected void doDumpYamlDataFormats(
CamelContext camelContext, Map dataFormats, Resource resource,
ModelToYAMLDumper dumper, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String dump = dumper.dumpDataFormatsAsYaml(camelContext, dataFormats);
sbLocal.append(dump);
appendLogDump(resource, dump, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to YAML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
protected void doDumpXmlDataFormats(
CamelContext camelContext, Map dataFormats, Resource resource,
ModelToXMLDumper dumper, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String dump = dumper.dumpDataFormatsAsXml(camelContext, dataFormats);
sbLocal.append(dump);
appendLogDump(resource, dump, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to XML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
protected void doDumpRoutesAsXml(CamelContext camelContext) {
final ModelToXMLDumper dumper = PluginHelper.getModelToXMLDumper(camelContext);
final Model model = camelContext.getCamelContextExtension().getContextPlugin(Model.class);
final DummyResource dummy = new DummyResource(null, null);
final Set<String> files = new HashSet<>();
if (include.contains("*") || include.contains("all") || include.contains("beans")) {
int size = model.getCustomBeans().size();
if (size > 0) {
Map<Resource, List<BeanFactoryDefinition>> groups = new LinkedHashMap<>();
for (BeanFactoryDefinition bean : model.getCustomBeans()) {
Resource res = bean.getResource();
if (res == null) {
res = dummy;
}
List<BeanFactoryDefinition> beans = groups.computeIfAbsent(res, resource -> new ArrayList<>());
beans.add(bean);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, List<BeanFactoryDefinition>> entry : groups.entrySet()) {
List<BeanFactoryDefinition> beans = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXmlBeans(camelContext, beans, resource == dummy ? null : resource, dumper, "beans", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "beans", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} beans as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("dataFormats")) {
int size = model.getDataFormats().size();
if (size > 0) {
Map<Resource, Map<String, DataFormatDefinition>> groups = new LinkedHashMap<>();
for (Map.Entry<String, DataFormatDefinition> entry : model.getDataFormats().entrySet()) {
Resource res = entry.getValue().getResource();
if (res == null) {
res = dummy;
}
Map<String, DataFormatDefinition> dfs = groups.computeIfAbsent(res, resource -> new LinkedHashMap<>());
dfs.put(entry.getKey(), entry.getValue());
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, Map<String, DataFormatDefinition>> entry : groups.entrySet()) {
Map<String, DataFormatDefinition> dfs = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXmlDataFormats(camelContext, dfs, resource == dummy ? null : resource, dumper, "dataFormats", sbLocal,
sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "dataFormats", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} data formats as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("rests")) {
int size = model.getRestDefinitions().size();
if (size > 0) {
Map<Resource, RestsDefinition> groups = new LinkedHashMap<>();
for (RestDefinition rest : model.getRestDefinitions()) {
Resource res = rest.getResource();
if (res == null) {
res = dummy;
}
RestsDefinition routes = groups.computeIfAbsent(res, resource -> new RestsDefinition());
routes.getRests().add(rest);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RestsDefinition> entry : groups.entrySet()) {
RestsDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXml(camelContext, def, resource == dummy ? null : resource, dumper, "rest", "rests", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "rests", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} rests as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routeConfigurations")
|| include.contains("route-configurations")) {
int size = model.getRouteConfigurationDefinitions().size();
if (size > 0) {
Map<Resource, RouteConfigurationsDefinition> groups = new LinkedHashMap<>();
for (RouteConfigurationDefinition config : model.getRouteConfigurationDefinitions()) {
Resource res = config.getResource();
if (res == null) {
res = dummy;
}
RouteConfigurationsDefinition routes
= groups.computeIfAbsent(res, resource -> new RouteConfigurationsDefinition());
routes.getRouteConfigurations().add(config);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RouteConfigurationsDefinition> entry : groups.entrySet()) {
RouteConfigurationsDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXml(camelContext, def, resource == dummy ? null : resource, dumper, "routeConfiguration",
"route-configurations",
sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "route-configurations", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} route-configurations as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routeTemplates")
|| include.contains("route-templates")) {
int size = model.getRouteTemplateDefinitions().size();
if (size > 0) {
Map<Resource, RouteTemplatesDefinition> groups = new LinkedHashMap<>();
for (RouteTemplateDefinition rt : model.getRouteTemplateDefinitions()) {
Resource res = rt.getResource();
if (res == null) {
res = dummy;
}
RouteTemplatesDefinition routes = groups.computeIfAbsent(res, resource -> new RouteTemplatesDefinition());
routes.getRouteTemplates().add(rt);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RouteTemplatesDefinition> entry : groups.entrySet()) {
RouteTemplatesDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXml(camelContext, def, resource == dummy ? null : resource, dumper, "routeTemplate",
"route-templates", sbLocal, sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "route-templates", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} route-templates as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (include.contains("*") || include.contains("all") || include.contains("routes")) {
int size = model.getRouteDefinitions().size();
if (size > 0) {
Map<Resource, RoutesDefinition> groups = new LinkedHashMap<>();
for (RouteDefinition route : model.getRouteDefinitions()) {
if ((route.isRest() != null && route.isRest()) || (route.isTemplate() != null && route.isTemplate())) {
// skip routes that are rest/templates
continue;
}
Resource res = route.getResource();
if (res == null) {
res = dummy;
}
RoutesDefinition routes = groups.computeIfAbsent(res, resource -> new RoutesDefinition());
routes.getRoutes().add(route);
}
StringBuilder sbLog = new StringBuilder();
for (Map.Entry<Resource, RoutesDefinition> entry : groups.entrySet()) {
RoutesDefinition def = entry.getValue();
Resource resource = entry.getKey();
StringBuilder sbLocal = new StringBuilder();
doDumpXml(camelContext, def, resource == dummy ? null : resource, dumper, "route", "routes", sbLocal,
sbLog);
// dump each resource into its own file
doDumpToDirectory(resource, sbLocal, "routes", "xml", files);
}
if (!sbLog.isEmpty() && log) {
LOG.info("Dumping {} routes as XML", size);
LOG.info("{}", sbLog);
}
}
}
if (output != null && !files.isEmpty()) {
// all XML files need to have <camel> as root tag
doAdjustXmlFiles(files);
}
}
protected void doDumpXmlBeans(
CamelContext camelContext, List beans, Resource resource,
ModelToXMLDumper dumper, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String dump = dumper.dumpBeansAsXml(camelContext, beans);
sbLocal.append(dump);
appendLogDump(resource, dump, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to XML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
protected void doDumpXml(
CamelContext camelContext, NamedNode def, Resource resource,
ModelToXMLDumper dumper, String replace, String kind, StringBuilder sbLocal, StringBuilder sbLog) {
try {
String xml = dumper.dumpModelAsXml(camelContext, def, resolvePlaceholders, generatedIds, false);
// remove spring schema xmlns that camel-jaxb dumper includes
xml = StringHelper.replaceFirst(xml, " xmlns=\"http://camel.apache.org/schema/spring\">", ">");
xml = xml.replace("</" + replace + ">", "</" + replace + ">\n");
// remove outer tag (routes, rests, etc)
replace = replace + "s";
xml = StringHelper.replaceFirst(xml, "<" + replace + ">", "");
xml = StringHelper.replaceFirst(xml, "</" + replace + ">", "");
sbLocal.append(xml);
appendLogDump(resource, xml, sbLog);
} catch (Exception e) {
LOG.warn("Error dumping {}} to XML due to {}. This exception is ignored.", kind, e.getMessage(), e);
}
}
@SuppressWarnings("ResultOfMethodCallIgnored")
protected void doDumpToDirectory(Resource resource, StringBuilder sbLocal, String kind, String ext, Set<String> files) {
if (output != null && !sbLocal.isEmpty()) {
// make sure directory exists
File dir = new File(output);
dir.mkdirs();
String name = resolveFileName(ext, resource);
boolean newFile = files.isEmpty() || !files.contains(name);
File target = new File(output, name);
try {
if (newFile) {
// write as new file (override old file if exists)
IOHelper.writeText(sbLocal.toString(), target);
} else {
// append to existing file
IOHelper.appendText(sbLocal.toString(), target);
}
files.add(name);
LOG.info("Dumped {} to file: {}", kind, target);
} catch (IOException e) {
throw new RuntimeException("Error dumping " + kind + " to file: " + target, e);
}
}
}
protected void doAdjustXmlFiles(Set<String> files) {
for (String name : files) {
if (name.endsWith(".xml")) {
try {
File file = new File(output, name);
// wrap xml files with <camel> root tag
StringBuilder sb = new StringBuilder();
sb.append("<camel>\n\n");
String xml = IOHelper.loadText(new FileInputStream(file));
sb.append(xml);
sb.append("\n</camel>\n");
IOHelper.writeText(sb.toString(), file);
} catch (Exception e) {
LOG.warn("Error adjusting dumped XML file: {} due to {}. This exception is ignored.", name, e.getMessage(),
e);
}
}
}
}
protected void appendLogDump(Resource resource, String dump, StringBuilder sbLog) {
String loc = null;
if (resource != null) {
loc = extractLocationName(resource.getLocation());
}
if (loc != null) {
sbLog.append(String.format("%nSource: %s%n%s%n%s%n", loc, DIVIDER, dump));
} else {
sbLog.append(String.format("%n%n%s%n", dump));
}
}
private static final | DefaultDumpRoutesStrategy |
java | junit-team__junit5 | documentation/src/test/java/example/UsingTheLauncherDemo.java | {
"start": 5624,
"end": 5704
} | class ____ implements LauncherDiscoveryListener {
}
| CustomLauncherDiscoveryListener |
java | quarkusio__quarkus | extensions/redis-cache/deployment/src/test/java/io/quarkus/cache/redis/deployment/ChainedRedisCacheTest.java | {
"start": 1225,
"end": 1520
} | class ____ {
@CacheResult(cacheName = "cache1")
public String cache1(String key) {
return key + ":" + cache2(42);
}
@CacheResult(cacheName = "cache2")
public int cache2(int value) {
return value;
}
}
}
| ChainedCachedService |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_yunban.java | {
"start": 1356,
"end": 1811
} | class ____ {
private String sourceId;
private Map<String, String> ext;
public String getSourceId() {
return sourceId;
}
public void setSourceId(String sourceId) {
this.sourceId = sourceId;
}
public Map<String, String> getExt() {
return ext;
}
public void setExt(Map<String, String> ext) {
this.ext = ext;
}
}
}
| RelationItem |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/extraction/BaseMappingExtractor.java | {
"start": 36755,
"end": 37012
} | interface ____ {
void verify(
Method method,
@Nullable FunctionStateTemplate state,
FunctionSignatureTemplate arguments,
@Nullable FunctionOutputTemplate result);
}
}
| MethodVerification |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/aggregate/HANAAggregateSupport.java | {
"start": 19455,
"end": 20921
} | class ____ implements JsonWriteExpression {
private final SelectableMapping selectableMapping;
private final String customWriteExpressionStart;
private final String customWriteExpressionEnd;
BasicJsonWriteExpression(SelectableMapping selectableMapping, String customWriteExpression) {
this.selectableMapping = selectableMapping;
if ( customWriteExpression.equals( "?" ) ) {
this.customWriteExpressionStart = "";
this.customWriteExpressionEnd = "";
}
else {
final String[] parts = StringHelper.split( "?", customWriteExpression );
assert parts.length == 2;
this.customWriteExpressionStart = parts[0];
this.customWriteExpressionEnd = parts[1];
}
}
@Override
public boolean isAggregate() {
return selectableMapping.getJdbcMapping().getJdbcType().isJson();
}
@Override
public void append(
SqlAppender sb,
String path,
SqlAstTranslator<?> translator,
AggregateColumnWriteExpression expression) {
sb.append( customWriteExpressionStart );
// We use NO_UNTYPED here so that expressions which require type inference are casted explicitly,
// since we don't know how the custom write expression looks like where this is embedded,
// so we have to be pessimistic and avoid ambiguities
translator.render( expression.getValueExpression( selectableMapping ), SqlAstNodeRenderingMode.NO_UNTYPED );
sb.append( customWriteExpressionEnd );
}
}
private static | BasicJsonWriteExpression |
java | netty__netty | transport-sctp/src/main/java/io/netty/channel/sctp/nio/NioSctpServerChannel.java | {
"start": 7035,
"end": 7378
} | class ____ extends DefaultSctpServerChannelConfig {
private NioSctpServerChannelConfig(NioSctpServerChannel channel, SctpServerChannel javaChannel) {
super(channel, javaChannel);
}
@Override
protected void autoReadCleared() {
clearReadPending();
}
}
}
| NioSctpServerChannelConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/EmbeddableWithToOneAssociation4Test.java | {
"start": 3637,
"end": 4054
} | class ____ {
@Id
//int id;
Integer id;
@Embedded
ParkingSpotDetails details;
public ParkingSpot() {
}
public ParkingSpot(int id, String garage, Employee assignedTo) {
this.id = id;
this.details = new ParkingSpotDetails(garage, assignedTo);
}
public int getId() {
return id;
}
public ParkingSpotDetails getDetails() {
return details;
}
}
@Embeddable
public static | ParkingSpot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.