language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/BooleanArrayAssert.java | {
"start": 965,
"end": 1148
} | class ____ extends AbstractBooleanArrayAssert<BooleanArrayAssert> {
public BooleanArrayAssert(boolean[] actual) {
super(actual, BooleanArrayAssert.class);
}
}
| BooleanArrayAssert |
java | apache__camel | dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/ExportPropertiesParser.java | {
"start": 1259,
"end": 2054
} | class ____ extends DefaultPropertiesParser {
private final CamelContext camelContext;
public ExportPropertiesParser(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public String parseProperty(String key, String value, PropertiesLookup properties) {
if (value == null) {
// the key may refer to a properties function so make sure we include this during export
if (key != null) {
try {
camelContext.getPropertiesComponent().getPropertiesFunction(key);
} catch (Exception e) {
// ignore
}
}
return PropertyConfigurerSupport.MAGIC_VALUE;
}
return value;
}
}
| ExportPropertiesParser |
java | elastic__elasticsearch | x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringPipeTests.java | {
"start": 997,
"end": 5670
} | class ____ extends AbstractNodeTestCase<BinaryStringStringPipe, Pipe> {
@Override
protected BinaryStringStringPipe randomInstance() {
return randomBinaryStringStringPipe();
}
private Expression randomBinaryStringStringExpression() {
return randomBinaryStringStringPipe().expression();
}
public static BinaryStringStringPipe randomBinaryStringStringPipe() {
List<Pipe> functions = new ArrayList<>();
functions.add(new Position(randomSource(), randomStringLiteral(), randomStringLiteral()).makePipe());
// if we decide to add DIFFERENCE(string,string) in the future, here we'd add it as well
return (BinaryStringStringPipe) randomFrom(functions);
}
@Override
public void testTransform() {
// test transforming only the properties (source, expression),
// skipping the children (the two parameters of the binary function) which are tested separately
BinaryStringStringPipe b1 = randomInstance();
Expression newExpression = randomValueOtherThan(b1.expression(), () -> randomBinaryStringStringExpression());
BinaryStringStringPipe newB = new BinaryStringStringPipe(b1.source(), newExpression, b1.left(), b1.right(), b1.operation());
assertEquals(newB, b1.transformPropertiesOnly(Expression.class, v -> Objects.equals(v, b1.expression()) ? newExpression : v));
BinaryStringStringPipe b2 = randomInstance();
Source newLoc = randomValueOtherThan(b2.source(), () -> randomSource());
newB = new BinaryStringStringPipe(newLoc, b2.expression(), b2.left(), b2.right(), b2.operation());
assertEquals(newB, b2.transformPropertiesOnly(Source.class, v -> Objects.equals(v, b2.source()) ? newLoc : v));
}
@Override
public void testReplaceChildren() {
BinaryStringStringPipe b = randomInstance();
Pipe newLeft = pipe(((Expression) randomValueOtherThan(b.left(), () -> randomStringLiteral())));
Pipe newRight = pipe(((Expression) randomValueOtherThan(b.right(), () -> randomStringLiteral())));
BinaryStringStringPipe newB = new BinaryStringStringPipe(b.source(), b.expression(), b.left(), b.right(), b.operation());
BinaryPipe transformed = newB.replaceChildren(newLeft, b.right());
assertEquals(transformed.left(), newLeft);
assertEquals(transformed.source(), b.source());
assertEquals(transformed.expression(), b.expression());
assertEquals(transformed.right(), b.right());
transformed = newB.replaceChildren(b.left(), newRight);
assertEquals(transformed.left(), b.left());
assertEquals(transformed.source(), b.source());
assertEquals(transformed.expression(), b.expression());
assertEquals(transformed.right(), newRight);
transformed = newB.replaceChildren(newLeft, newRight);
assertEquals(transformed.left(), newLeft);
assertEquals(transformed.source(), b.source());
assertEquals(transformed.expression(), b.expression());
assertEquals(transformed.right(), newRight);
}
@Override
protected BinaryStringStringPipe mutate(BinaryStringStringPipe instance) {
List<Function<BinaryStringStringPipe, BinaryStringStringPipe>> randoms = new ArrayList<>();
randoms.add(
f -> new BinaryStringStringPipe(
f.source(),
f.expression(),
pipe(((Expression) randomValueOtherThan(f.left(), () -> randomStringLiteral()))),
f.right(),
f.operation()
)
);
randoms.add(
f -> new BinaryStringStringPipe(
f.source(),
f.expression(),
f.left(),
pipe(((Expression) randomValueOtherThan(f.right(), () -> randomStringLiteral()))),
f.operation()
)
);
randoms.add(
f -> new BinaryStringStringPipe(
f.source(),
f.expression(),
pipe(((Expression) randomValueOtherThan(f.left(), () -> randomStringLiteral()))),
pipe(((Expression) randomValueOtherThan(f.right(), () -> randomStringLiteral()))),
f.operation()
)
);
return randomFrom(randoms).apply(instance);
}
@Override
protected BinaryStringStringPipe copy(BinaryStringStringPipe instance) {
return new BinaryStringStringPipe(
instance.source(),
instance.expression(),
instance.left(),
instance.right(),
instance.operation()
);
}
}
| BinaryStringStringPipeTests |
java | google__dagger | dagger-android-proguard-processor/main/java/dagger/android/internal/proguard/KspProguardProcessor.java | {
"start": 1743,
"end": 2450
} | class ____ extends KspBasicAnnotationProcessor {
private static final XProcessingEnvConfig PROCESSING_ENV_CONFIG =
new XProcessingEnvConfig.Builder().build();
private XProcessingEnv env;
private KspProguardProcessor(SymbolProcessorEnvironment symbolProcessorEnvironment) {
super(symbolProcessorEnvironment, PROCESSING_ENV_CONFIG);
}
@Override
public void initialize(XProcessingEnv env) {
this.env = env;
}
@Override
public Iterable<XProcessingStep> processingSteps() {
return ImmutableList.of(new ProguardProcessingStep(env));
}
/** Provides the {@link KspProguardProcessor}. */
@AutoService(SymbolProcessorProvider.class)
public static final | KspProguardProcessor |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/sockjs/client/RestTemplateXhrTransportTests.java | {
"start": 2859,
"end": 7717
} | class ____ {
private static final JacksonJsonSockJsMessageCodec CODEC = new JacksonJsonSockJsMessageCodec();
private final WebSocketHandler webSocketHandler = mock();
@Test
void connectReceiveAndClose() throws Exception {
String body = """
o
a["foo"]
c[3000,"Go away!"]""";
ClientHttpResponse response = response(HttpStatus.OK, body);
connect(response);
verify(this.webSocketHandler).afterConnectionEstablished(any());
verify(this.webSocketHandler).handleMessage(any(), eq(new TextMessage("foo")));
verify(this.webSocketHandler).afterConnectionClosed(any(), eq(new CloseStatus(3000, "Go away!")));
verifyNoMoreInteractions(this.webSocketHandler);
}
@Test
void connectReceiveAndCloseWithPrelude() throws Exception {
String prelude = "h".repeat(2048);
String body = """
%s
o
a["foo"]
c[3000,"Go away!"]""".formatted(prelude);
ClientHttpResponse response = response(HttpStatus.OK, body);
connect(response);
verify(this.webSocketHandler).afterConnectionEstablished(any());
verify(this.webSocketHandler).handleMessage(any(), eq(new TextMessage("foo")));
verify(this.webSocketHandler).afterConnectionClosed(any(), eq(new CloseStatus(3000, "Go away!")));
verifyNoMoreInteractions(this.webSocketHandler);
}
@Test
void connectReceiveAndCloseWithStompFrame() throws Exception {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.SEND);
accessor.setDestination("/destination");
MessageHeaders headers = accessor.getMessageHeaders();
Message<byte[]> message = MessageBuilder.createMessage("body".getBytes(UTF_8), headers);
byte[] bytes = new StompEncoder().encode(message);
TextMessage textMessage = new TextMessage(bytes);
SockJsFrame frame = SockJsFrame.messageFrame(new JacksonJsonSockJsMessageCodec(), textMessage.getPayload());
String body = """
o
%s
c[3000,"Go away!"]""".formatted(frame.getContent());
ClientHttpResponse response = response(HttpStatus.OK, body);
connect(response);
verify(this.webSocketHandler).afterConnectionEstablished(any());
verify(this.webSocketHandler).handleMessage(any(), eq(textMessage));
verify(this.webSocketHandler).afterConnectionClosed(any(), eq(new CloseStatus(3000, "Go away!")));
verifyNoMoreInteractions(this.webSocketHandler);
}
@Test
void connectFailure() {
final HttpServerErrorException expected = new HttpServerErrorException(HttpStatus.INTERNAL_SERVER_ERROR);
RestOperations restTemplate = mock();
given(restTemplate.execute(any(), eq(HttpMethod.POST), any(), any())).willThrow(expected);
final CountDownLatch latch = new CountDownLatch(1);
connect(restTemplate).whenComplete((result, ex) -> {
if (ex == expected) {
latch.countDown();
}
});
verifyNoMoreInteractions(this.webSocketHandler);
}
@Test
void errorResponseStatus() throws Exception {
connect(response(HttpStatus.OK, "o\n"), response(HttpStatus.INTERNAL_SERVER_ERROR, "Oops"));
verify(this.webSocketHandler).afterConnectionEstablished(any());
verify(this.webSocketHandler).handleTransportError(any(), any());
verify(this.webSocketHandler).afterConnectionClosed(any(), any());
verifyNoMoreInteractions(this.webSocketHandler);
}
@Test
void responseClosedAfterDisconnected() throws Exception {
String body = """
o
c[3000,"Go away!"]
a["foo"]
""";
ClientHttpResponse response = response(HttpStatus.OK, body);
connect(response);
verify(this.webSocketHandler).afterConnectionEstablished(any());
verify(this.webSocketHandler).afterConnectionClosed(any(), any());
verifyNoMoreInteractions(this.webSocketHandler);
verify(response).close();
}
private CompletableFuture<WebSocketSession> connect(ClientHttpResponse... responses) {
return connect(new TestRestTemplate(responses));
}
private CompletableFuture<WebSocketSession> connect(RestOperations restTemplate) {
RestTemplateXhrTransport transport = new RestTemplateXhrTransport(restTemplate);
transport.setTaskExecutor(new SyncTaskExecutor());
SockJsUrlInfo urlInfo = new SockJsUrlInfo(URI.create("https://example.com"));
HttpHeaders headers = new HttpHeaders();
headers.add("h-foo", "h-bar");
TransportRequest request = new DefaultTransportRequest(urlInfo, headers, headers,
transport, TransportType.XHR, CODEC);
return transport.connectAsync(request, this.webSocketHandler);
}
private ClientHttpResponse response(HttpStatus status, String body) throws IOException {
ClientHttpResponse response = mock();
InputStream inputStream = getInputStream(body);
given(response.getStatusCode()).willReturn(status);
given(response.getBody()).willReturn(inputStream);
return response;
}
private InputStream getInputStream(String content) {
byte[] bytes = content.getBytes(UTF_8);
return new ByteArrayInputStream(bytes);
}
private static | RestTemplateXhrTransportTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/IdentifierSourceNonAggregatedCompositeImpl.java | {
"start": 1196,
"end": 5587
} | class ____ implements IdentifierSourceNonAggregatedComposite, EmbeddableSource {
private final RootEntitySourceImpl rootEntitySource;
private final AttributePath attributePathBase;
private final AttributeRole attributeRoleBase;
private final IdentifierGeneratorDefinition generatorDefinition;
// NOTE: not typed because we need to expose as both:
// List<AttributeSource>
// List<SingularAttributeSource>
// :(
private final List attributeSources;
private final EmbeddableSource idClassSource;
private final ToolingHintContext toolingHintContext;
IdentifierSourceNonAggregatedCompositeImpl(RootEntitySourceImpl rootEntitySource) {
this.rootEntitySource = rootEntitySource;
this.attributePathBase = rootEntitySource.getAttributePathBase().append( "<id>" );
this.attributeRoleBase = rootEntitySource.getAttributeRoleBase().append( "<id>" );
this.generatorDefinition = EntityHierarchySourceImpl.interpretGeneratorDefinition(
rootEntitySource.sourceMappingDocument(),
rootEntitySource.getEntityNamingSource(),
rootEntitySource.jaxbEntityMapping().getCompositeId().getGenerator()
);
this.attributeSources = new ArrayList();
AttributesHelper.processCompositeKeySubAttributes(
rootEntitySource.sourceMappingDocument(),
new AttributesHelper.Callback() {
@Override
public AttributeSourceContainer getAttributeSourceContainer() {
return IdentifierSourceNonAggregatedCompositeImpl.this;
}
@Override
@SuppressWarnings("unchecked")
public void addAttributeSource(AttributeSource attributeSource) {
attributeSources.add( attributeSource );
}
},
rootEntitySource.jaxbEntityMapping().getCompositeId().getKeyPropertyOrKeyManyToOne()
);
// NOTE : the HBM support for IdClass is very limited. Essentially
// we assume that all identifier attributes occur in the IdClass
// using the same name and type.
this.idClassSource = interpretIdClass(
rootEntitySource.sourceMappingDocument(),
rootEntitySource.jaxbEntityMapping().getCompositeId()
);
this.toolingHintContext = Helper.collectToolingHints(
rootEntitySource.getToolingHintContext(),
rootEntitySource.jaxbEntityMapping().getCompositeId()
);
}
private EmbeddableSource interpretIdClass(
MappingDocument mappingDocument,
JaxbHbmCompositeIdType jaxbHbmCompositeIdMapping) {
// if <composite-id/> is null here we have much bigger problems :)
if ( !jaxbHbmCompositeIdMapping.isMapped() ) {
return null;
}
final String className = jaxbHbmCompositeIdMapping.getClazz();
if ( StringHelper.isEmpty( className ) ) {
return null;
}
final String idClassQualifiedName = mappingDocument.qualifyClassName( className );
final JavaTypeDescriptor idClassTypeDescriptor = new JavaTypeDescriptor() {
@Override
public String getName() {
return idClassQualifiedName;
}
};
return new IdClassSource( idClassTypeDescriptor, rootEntitySource, mappingDocument );
}
@Override
@SuppressWarnings("unchecked")
public List<SingularAttributeSource> getAttributeSourcesMakingUpIdentifier() {
return attributeSources;
}
@Override
public EmbeddableSource getIdClassSource() {
return idClassSource;
}
@Override
public IdentifierGeneratorDefinition getIdentifierGeneratorDescriptor() {
return generatorDefinition;
}
@Override
public EntityIdentifierNature getNature() {
return EntityIdentifierNature.NON_AGGREGATED_COMPOSITE;
}
@Override
public JavaTypeDescriptor getTypeDescriptor() {
return null;
}
@Override
public String getParentReferenceAttributeName() {
return null;
}
@Override
public boolean isDynamic() {
return false;
}
@Override
public boolean isUnique() {
return false;
}
@Override
public AttributePath getAttributePathBase() {
return attributePathBase;
}
@Override
public AttributeRole getAttributeRoleBase() {
return attributeRoleBase;
}
@Override
@SuppressWarnings("unchecked")
public List<AttributeSource> attributeSources() {
return attributeSources;
}
@Override
public LocalMetadataBuildingContext getLocalMetadataBuildingContext() {
return rootEntitySource.metadataBuildingContext();
}
@Override
public EmbeddableSource getEmbeddableSource() {
return this;
}
@Override
public ToolingHintContext getToolingHintContext() {
return toolingHintContext;
}
}
| IdentifierSourceNonAggregatedCompositeImpl |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/AsyncSupportConfigurer.java | {
"start": 1276,
"end": 4140
} | class ____ {
private @Nullable AsyncTaskExecutor taskExecutor;
private @Nullable Long timeout;
private final List<CallableProcessingInterceptor> callableInterceptors = new ArrayList<>();
private final List<DeferredResultProcessingInterceptor> deferredResultInterceptors = new ArrayList<>();
/**
* The provided task executor is used for the following:
* <ol>
* <li>Handle {@link Callable} controller method return values.
* <li>Perform blocking writes when streaming to the response
* through a reactive (for example, Reactor, RxJava) controller method return value.
* </ol>
* <p>If your application has controllers with such return types, please
* configure an {@link AsyncTaskExecutor} as the one used by default is not
* suitable for production under load.
* @param taskExecutor the task executor instance to use by default
*/
public AsyncSupportConfigurer setTaskExecutor(AsyncTaskExecutor taskExecutor) {
this.taskExecutor = taskExecutor;
return this;
}
/**
* Specify the amount of time, in milliseconds, before asynchronous request
* handling times out. In Servlet 3, the timeout begins after the main request
* processing thread has exited and ends when the request is dispatched again
* for further processing of the concurrently produced result.
* <p>If this value is not set, the default timeout of the underlying
* implementation is used.
* @param timeout the timeout value in milliseconds
*/
public AsyncSupportConfigurer setDefaultTimeout(long timeout) {
this.timeout = timeout;
return this;
}
/**
* Configure lifecycle interceptors with callbacks around concurrent request
* execution that starts when a controller returns a
* {@link java.util.concurrent.Callable}.
* @param interceptors the interceptors to register
*/
public AsyncSupportConfigurer registerCallableInterceptors(CallableProcessingInterceptor... interceptors) {
this.callableInterceptors.addAll(Arrays.asList(interceptors));
return this;
}
/**
* Configure lifecycle interceptors with callbacks around concurrent request
* execution that starts when a controller returns a {@link DeferredResult}.
* @param interceptors the interceptors to register
*/
public AsyncSupportConfigurer registerDeferredResultInterceptors(
DeferredResultProcessingInterceptor... interceptors) {
this.deferredResultInterceptors.addAll(Arrays.asList(interceptors));
return this;
}
protected @Nullable AsyncTaskExecutor getTaskExecutor() {
return this.taskExecutor;
}
protected @Nullable Long getTimeout() {
return this.timeout;
}
protected List<CallableProcessingInterceptor> getCallableInterceptors() {
return this.callableInterceptors;
}
protected List<DeferredResultProcessingInterceptor> getDeferredResultInterceptors() {
return this.deferredResultInterceptors;
}
}
| AsyncSupportConfigurer |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java | {
"start": 8720,
"end": 11612
} | class ____ extends Node {
private static final Class<?>[] cstrSig =
{ Integer.TYPE, RecordReader.class, Class.class };
static void addIdentifier(String ident,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Node.addIdentifier(ident, cstrSig, WNode.class, cl);
}
private String indir;
private InputFormat inf;
public WNode(String ident) {
super(ident);
}
/**
* Let the first actual define the InputFormat and the second define
* the <code>mapred.input.dir</code> property.
*/
public void parse(List<Token> ll, JobConf job) throws IOException {
StringBuilder sb = new StringBuilder();
Iterator<Token> i = ll.iterator();
while (i.hasNext()) {
Token t = i.next();
if (TType.COMMA.equals(t.getType())) {
try {
inf = (InputFormat)ReflectionUtils.newInstance(
job.getClassByName(sb.toString()),
job);
} catch (ClassNotFoundException e) {
throw (IOException)new IOException().initCause(e);
} catch (IllegalArgumentException e) {
throw (IOException)new IOException().initCause(e);
}
break;
}
sb.append(t.getStr());
}
if (!i.hasNext()) {
throw new IOException("Parse error");
}
Token t = i.next();
if (!TType.QUOT.equals(t.getType())) {
throw new IOException("Expected quoted string");
}
indir = t.getStr();
// no check for ll.isEmpty() to permit extension
}
private JobConf getConf(JobConf job) {
JobConf conf = new JobConf(job);
FileInputFormat.setInputPaths(conf, indir);
conf.setClassLoader(job.getClassLoader());
return conf;
}
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
return inf.getSplits(getConf(job), numSplits);
}
public ComposableRecordReader getRecordReader(
InputSplit split, JobConf job, Reporter reporter) throws IOException {
try {
if (!rrCstrMap.containsKey(ident)) {
throw new IOException("No RecordReader for " + ident);
}
return rrCstrMap.get(ident).newInstance(id,
inf.getRecordReader(split, getConf(job), reporter), cmpcl);
} catch (IllegalAccessException e) {
throw (IOException)new IOException().initCause(e);
} catch (InstantiationException e) {
throw (IOException)new IOException().initCause(e);
} catch (InvocationTargetException e) {
throw (IOException)new IOException().initCause(e);
}
}
public String toString() {
return ident + "(" + inf.getClass().getName() + ",\"" + indir + "\")";
}
}
/**
* Internal nodetype for "composite" InputFormats.
*/
static | WNode |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/DiagnosticsCollector.java | {
"start": 1081,
"end": 1147
} | interface ____ can be used for collecting diagnostics.
*/
public | that |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/OptionalOfRedundantMethodTest.java | {
"start": 4840,
"end": 5114
} | class ____ {
String f() {
return Optional.of("test").orElseGet(() -> "test2");
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.Optional;
| Test |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/AssertTimeoutAssertionsTests.java | {
"start": 1110,
"end": 5788
} | class ____ {
private static final ThreadLocal<AtomicBoolean> changed = ThreadLocal.withInitial(() -> new AtomicBoolean(false));
private final Executable nix = () -> {
};
// --- executable ----------------------------------------------------------
@Test
void assertTimeoutForExecutableThatCompletesBeforeTheTimeout() {
changed.get().set(false);
assertTimeout(ofMillis(500), () -> changed.get().set(true));
assertTrue(changed.get().get(), "should have executed in the same thread");
assertTimeout(ofMillis(500), nix, "message");
assertTimeout(ofMillis(500), nix, () -> "message");
}
@Test
void assertTimeoutForExecutableThatThrowsAnException() {
RuntimeException exception = assertThrows(RuntimeException.class, () -> assertTimeout(ofMillis(500), () -> {
throw new RuntimeException("not this time");
}));
assertMessageEquals(exception, "not this time");
}
@Test
void assertTimeoutForExecutableThatThrowsAnAssertionFailedError() {
AssertionFailedError exception = assertThrows(AssertionFailedError.class,
() -> assertTimeout(ofMillis(500), () -> fail("enigma")));
assertMessageEquals(exception, "enigma");
}
@Test
void assertTimeoutForExecutableThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class,
() -> assertTimeout(ofMillis(10), this::nap));
assertMessageStartsWith(error, "execution exceeded timeout of 10 ms by");
}
@Test
void assertTimeoutWithMessageForExecutableThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class,
() -> assertTimeout(ofMillis(10), this::nap, "Tempus Fugit"));
assertMessageStartsWith(error, "Tempus Fugit ==> execution exceeded timeout of 10 ms by");
}
@Test
void assertTimeoutWithMessageSupplierForExecutableThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class,
() -> assertTimeout(ofMillis(10), this::nap, () -> "Tempus" + " " + "Fugit"));
assertMessageStartsWith(error, "Tempus Fugit ==> execution exceeded timeout of 10 ms by");
}
// --- supplier ------------------------------------------------------------
@Test
void assertTimeoutForSupplierThatCompletesBeforeTheTimeout() {
changed.get().set(false);
String result = assertTimeout(ofMillis(500), () -> {
changed.get().set(true);
return "Tempus Fugit";
});
assertTrue(changed.get().get(), "should have executed in the same thread");
assertEquals("Tempus Fugit", result);
assertEquals("Tempus Fugit", assertTimeout(ofMillis(500), () -> "Tempus Fugit", "message"));
assertEquals("Tempus Fugit", assertTimeout(ofMillis(500), () -> "Tempus Fugit", () -> "message"));
}
@Test
void assertTimeoutForSupplierThatThrowsAnException() {
RuntimeException exception = assertThrows(RuntimeException.class, () -> {
assertTimeout(ofMillis(500),
() -> ExceptionUtils.throwAsUncheckedException(new RuntimeException("not this time")));
});
assertMessageEquals(exception, "not this time");
}
@Test
void assertTimeoutForSupplierThatThrowsAnAssertionFailedError() {
AssertionFailedError exception = assertThrows(AssertionFailedError.class, () -> {
assertTimeout(ofMillis(500), () -> fail("enigma"));
});
assertMessageEquals(exception, "enigma");
}
@Test
void assertTimeoutForSupplierThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class, () -> {
assertTimeout(ofMillis(10), () -> {
nap();
return "Tempus Fugit";
});
});
assertMessageStartsWith(error, "execution exceeded timeout of 10 ms by");
}
@Test
void assertTimeoutWithMessageForSupplierThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class, () -> {
assertTimeout(ofMillis(10), () -> {
nap();
return "Tempus Fugit";
}, "Tempus Fugit");
});
assertMessageStartsWith(error, "Tempus Fugit ==> execution exceeded timeout of 10 ms by");
}
@Test
void assertTimeoutWithMessageSupplierForSupplierThatCompletesAfterTheTimeout() {
AssertionFailedError error = assertThrows(AssertionFailedError.class, () -> {
assertTimeout(ofMillis(10), () -> {
nap();
return "Tempus Fugit";
}, () -> "Tempus" + " " + "Fugit");
});
assertMessageStartsWith(error, "Tempus Fugit ==> execution exceeded timeout of 10 ms by");
}
/**
* Take a nap for 100 milliseconds.
*/
private void nap() throws InterruptedException {
long start = System.nanoTime();
// workaround for imprecise clocks (yes, Windows, I'm talking about you)
do {
Thread.sleep(100);
} while (System.nanoTime() - start < 100_000_000L);
}
}
| AssertTimeoutAssertionsTests |
java | playframework__playframework | web/play-java-forms/src/main/java/play/data/validation/Constraints.java | {
"start": 25203,
"end": 25630
} | class ____
implements PlayConstraintValidator<Validate, Validatable<?>> {
@Override
public void initialize(final Validate constraintAnnotation) {}
@Override
public boolean isValid(
final Validatable<?> value, final ConstraintValidatorContext constraintValidatorContext) {
return reportValidationStatus(value.validate(), constraintValidatorContext);
}
}
public static | ValidateValidator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ReturnValueIgnoredTest.java | {
"start": 31778,
"end": 32090
} | class ____ {
void test(VarHandle.AccessMode accessMode) {
accessMode.methodName();
}
}
""")
.doTest();
}
@Test
public void throwableMethods() {
compilationHelper
.addSourceLines(
"Test.java",
" | Test |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/seda/SedaDefaultBlockWhenFullTest.java | {
"start": 3410,
"end": 4354
} | class ____'t working as intended");
assertIsInstanceOf(IllegalStateException.class, e.getCause());
}
@Test
public void testSedaBlockingWhenFull() throws Exception {
getMockEndpoint(MOCK_URI).setExpectedMessageCount(QUEUE_SIZE + 2);
SedaEndpoint seda = context.getEndpoint(BLOCK_WHEN_FULL_URI, SedaEndpoint.class);
assertEquals(QUEUE_SIZE, seda.getQueue().remainingCapacity());
sendTwoOverCapacity(BLOCK_WHEN_FULL_URI, QUEUE_SIZE);
assertMockEndpointsSatisfied();
}
/**
* This method make sure that we hit the limit by sending two msg over the given capacity which allows the delayer
* to kick in, leaving the 2nd msg in the queue, blocking/throwing on the third one.
*/
private void sendTwoOverCapacity(String uri, int capacity) {
for (int i = 0; i < (capacity + 2); i++) {
template.sendBody(uri, "Message " + i);
}
}
}
| isn |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/BuildImageMojo.java | {
"start": 11898,
"end": 12347
} | class ____ extends AbstractBuildLog {
private static final long THRESHOLD = Duration.ofSeconds(2).toMillis();
private final Supplier<Log> log;
MojoBuildLog(Supplier<Log> log) {
this.log = log;
}
@Override
protected void log(String message) {
this.log.get().info(message);
}
@Override
protected Consumer<TotalProgressEvent> getProgressConsumer(String message) {
return new ProgressLog(message);
}
private | MojoBuildLog |
java | apache__camel | components/camel-telegram/src/main/java/org/apache/camel/component/telegram/model/InlineQueryResultGif.java | {
"start": 1213,
"end": 2003
} | class ____ extends InlineQueryResult {
private static final String TYPE = "gif";
@JsonProperty("gif_url")
private String gifUrl;
@JsonProperty("gif_width")
private String gifWidth;
@JsonProperty("gif_height")
private Integer gifHeight;
@JsonProperty("gif_duration")
private Integer duration;
@JsonProperty("thumb_url")
private String thumbUrl;
private String title;
private String caption;
@JsonProperty("parse_mode")
private String parseMode;
@JsonProperty("input_message_content")
private InputMessageContent inputMessageContext;
public InlineQueryResultGif() {
super(TYPE);
}
public static Builder builder() {
return new Builder();
}
public static final | InlineQueryResultGif |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/scripting/xmltags/IfSqlNode.java | {
"start": 736,
"end": 1232
} | class ____ implements SqlNode {
private final ExpressionEvaluator evaluator = ExpressionEvaluator.INSTANCE;
private final String test;
private final SqlNode contents;
public IfSqlNode(SqlNode contents, String test) {
this.test = test;
this.contents = contents;
}
@Override
public boolean apply(DynamicContext context) {
if (evaluator.evaluateBoolean(test, context.getBindings())) {
contents.apply(context);
return true;
}
return false;
}
}
| IfSqlNode |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java | {
"start": 33723,
"end": 36630
} | class ____.e 'ipc.scheduler.impl'. If default config is also not
* present, default class {@link DefaultRpcScheduler} is returned.
*
* @param namespace Namespace "ipc".
* @param port Server's listener port.
* @param conf Configuration properties.
* @return Class returned based on configuration.
*/
static Class<? extends RpcScheduler> getSchedulerClass(
String namespace, int port, Configuration conf) {
String schedulerKeyNameWithPort = namespace + "." + port + "."
+ CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY;
String schedulerKeyNameWithoutPort = namespace + "."
+ CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY;
Class<?> schedulerClass = conf.getClass(schedulerKeyNameWithPort, null);
// Patch the configuration for legacy fcq configuration that does not have
// a separate scheduler setting
if (schedulerClass == null) {
String queueKeyNameWithPort = namespace + "." + port + "."
+ CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY;
Class<?> queueClass = conf.getClass(queueKeyNameWithPort, null);
if (queueClass != null) {
if (queueClass.getCanonicalName().equals(
FairCallQueue.class.getCanonicalName())) {
conf.setClass(schedulerKeyNameWithPort, DecayRpcScheduler.class,
RpcScheduler.class);
}
}
}
schedulerClass = conf.getClass(schedulerKeyNameWithPort, null);
if (schedulerClass == null) {
schedulerClass = conf.getClass(schedulerKeyNameWithoutPort,
DefaultRpcScheduler.class);
}
return CallQueueManager.convertSchedulerClass(schedulerClass);
}
/*
* Refresh the call queue
*/
public synchronized void refreshCallQueue(Configuration conf) {
// Create the next queue
String prefix = getQueueClassPrefix();
this.maxQueueSize = handlerCount * conf.getInt(
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
callQueue.swapQueue(
getSchedulerClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf),
getQueueClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf),
maxQueueSize, prefix, conf);
callQueue.setClientBackoffEnabled(getClientBackoffEnable(
CommonConfigurationKeys.IPC_NAMESPACE, port, conf));
}
/**
* Get from config if client backoff is enabled on that port.
*/
@Deprecated
static boolean getClientBackoffEnable(
String prefix, Configuration conf) {
String name = prefix + "." +
CommonConfigurationKeys.IPC_BACKOFF_ENABLE;
return conf.getBoolean(name,
CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT);
}
/**
* Return boolean value configured by property 'ipc.<port>.backoff.enable'
* if it is present. If the config is not present, default config
* (without port) is used to derive | i |
java | redisson__redisson | redisson/src/test/java/org/redisson/SimpleDnsServer.java | {
"start": 1731,
"end": 2691
} | class ____ extends SimpleChannelInboundHandler<DatagramDnsQuery> {
@Override
protected void channelRead0(ChannelHandlerContext ctx, DatagramDnsQuery query) throws Exception {
DefaultDnsQuestion question = query.recordAt(DnsSection.QUESTION);
String requestedDomain = question.name();
DatagramDnsResponse response = new DatagramDnsResponse(query.recipient(), query.sender(), query.id());
response.addRecord(DnsSection.QUESTION, question);
response.addRecord(DnsSection.ANSWER, new DefaultDnsRawRecord(question.name(), DnsRecordType.A, 0,
Unpooled.wrappedBuffer(InetAddress.getByName(ip).getAddress()))); // Example IP
ctx.writeAndFlush(response);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
}
| DnsMessageHandler |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/message/QueryMsgTraceByIdSubCommand.java | {
"start": 1702,
"end": 7757
} | class ____ implements SubCommand {
@Override
public Options buildCommandlineOptions(Options options) {
Option opt = new Option("i", "msgId", true, "Message Id");
opt.setRequired(true);
options.addOption(opt);
opt = new Option("t", "traceTopic", true, "The name value of message trace topic");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("b", "beginTimestamp", true, "Begin timestamp(ms). default:0, eg:1676730526212");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("e", "endTimestamp", true, "End timestamp(ms). default:Long.MAX_VALUE, eg:1676730526212");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("c", "maxNum", true, "The maximum number of messages returned by the query, default:64");
opt.setRequired(false);
options.addOption(opt);
return options;
}
@Override
public String commandDesc() {
return "Query a message trace.";
}
@Override
public String commandName() {
return "queryMsgTraceById";
}
@Override
public String commandAlias() {
return "QueryMsgTraceById";
}
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
final String msgId = commandLine.getOptionValue('i').trim();
String traceTopic = TopicValidator.RMQ_SYS_TRACE_TOPIC;
if (commandLine.hasOption('t')) {
traceTopic = commandLine.getOptionValue('t').trim();
}
if (commandLine.hasOption('n')) {
defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim());
}
long beginTimestamp = 0;
long endTimestamp = Long.MAX_VALUE;
int maxNum = 64;
if (commandLine.hasOption("b")) {
beginTimestamp = Long.parseLong(commandLine.getOptionValue("b").trim());
}
if (commandLine.hasOption("e")) {
endTimestamp = Long.parseLong(commandLine.getOptionValue("e").trim());
}
if (commandLine.hasOption("c")) {
maxNum = Integer.parseInt(commandLine.getOptionValue("c").trim());
}
this.queryTraceByMsgId(defaultMQAdminExt, traceTopic, msgId, maxNum, beginTimestamp, endTimestamp);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + "command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
private void queryTraceByMsgId(final DefaultMQAdminExt admin, String traceTopic, String msgId, int maxNum,
long begin, long end)
throws MQClientException, InterruptedException {
admin.start();
QueryResult queryResult = admin.queryMessage(traceTopic, msgId, maxNum, begin, end);
List<MessageExt> messageList = queryResult.getMessageList();
List<TraceView> traceViews = new ArrayList<>();
for (MessageExt message : messageList) {
List<TraceView> traceView = TraceView.decodeFromTraceTransData(msgId, message);
traceViews.addAll(traceView);
}
this.printMessageTrace(traceViews);
}
private void printMessageTrace(List<TraceView> traceViews) {
Map<String, List<TraceView>> consumerTraceMap = new HashMap<>(16);
for (TraceView traceView : traceViews) {
if (traceView.getMsgType().equals(TraceType.Pub.name())) {
System.out.printf("%-10s %-20s %-20s %-20s %-10s %-10s%n",
"#Type",
"#ProducerGroup",
"#ClientHost",
"#SendTime",
"#CostTimes",
"#Status"
);
System.out.printf("%-10s %-20s %-20s %-20s %-10s %-10s%n",
"Pub",
traceView.getGroupName(),
traceView.getClientHost(),
DateFormatUtils.format(traceView.getTimeStamp(), "yyyy-MM-dd HH:mm:ss"),
traceView.getCostTime() + "ms",
traceView.getStatus()
);
System.out.printf("\n");
}
if (traceView.getMsgType().equals(TraceType.SubAfter.name())) {
String groupName = traceView.getGroupName();
if (consumerTraceMap.containsKey(groupName)) {
consumerTraceMap.get(groupName).add(traceView);
} else {
ArrayList<TraceView> views = new ArrayList<>();
views.add(traceView);
consumerTraceMap.put(groupName, views);
}
}
}
Iterator<String> consumers = consumerTraceMap.keySet().iterator();
while (consumers.hasNext()) {
System.out.printf("%-10s %-20s %-20s %-20s %-10s %-10s%n",
"#Type",
"#ConsumerGroup",
"#ClientHost",
"#ConsumerTime",
"#CostTimes",
"#Status"
);
List<TraceView> consumerTraces = consumerTraceMap.get(consumers.next());
for (TraceView traceView : consumerTraces) {
System.out.printf("%-10s %-20s %-20s %-20s %-10s %-10s%n",
"Sub",
traceView.getGroupName(),
traceView.getClientHost(),
DateFormatUtils.format(traceView.getTimeStamp(), "yyyy-MM-dd HH:mm:ss"),
traceView.getCostTime() + "ms",
traceView.getStatus()
);
}
System.out.printf("\n");
}
}
}
| QueryMsgTraceByIdSubCommand |
java | square__moshi | moshi/src/test/java/com/squareup/moshi/AdapterMethodsTest.java | {
"start": 14949,
"end": 15384
} | interface ____ {}
@Test
public void toAndFromNullJsonWithWriterAndReader() throws Exception {
Moshi moshi = new Moshi.Builder().add(new NullableIntToJsonAdapter()).build();
JsonAdapter<Point> pointAdapter = moshi.adapter(Point.class);
assertThat(pointAdapter.fromJson("{\"x\":null,\"y\":3}")).isEqualTo(new Point(-1, 3));
assertThat(pointAdapter.toJson(new Point(-1, 3))).isEqualTo("{\"y\":3}");
}
static | Nullable |
java | quarkusio__quarkus | integration-tests/smallrye-context-propagation/src/test/java/io/quarkus/context/test/mutiny/MutinyContextEndpoint.java | {
"start": 1281,
"end": 18404
} | class ____ {
@Inject
RequestBean doNotRemoveMe;
@Inject
ManagedExecutor all;
@Inject
ThreadContext allTc;
ExecutorService executor = Executors.newSingleThreadExecutor();
@PreDestroy
public void shutdown() {
executor.shutdown();
}
@GET
@Path("/resteasy-uni-cs")
public Uni<String> resteasyContextPropagationWithUniCreatedFromCSWithManagedExecutor(@Context UriInfo uriInfo) {
CompletableFuture<String> ret = all.completedFuture("OK");
return Uni.createFrom().completionStage(ret)
.emitOn(Infrastructure.getDefaultExecutor())
.onItem().transform(s -> {
Assertions.assertNotNull(uriInfo.getAbsolutePath());
try {
Assertions.assertTrue(
uriInfo.getAbsolutePath().toURL().toExternalForm().contains("/resteasy-uni-cs"));
} catch (MalformedURLException e) {
throw new AssertionError(e);
}
return s;
});
}
@GET
@Path("/resteasy-uni")
public Uni<String> resteasyContextPropagation(@Context UriInfo uriInfo) {
return Uni.createFrom().item("OK")
.emitOn(Infrastructure.getDefaultExecutor())
.onItem().transform(s -> {
Assertions.assertNotNull(uriInfo.getAbsolutePath());
try {
Assertions.assertTrue(
uriInfo.getAbsolutePath().toURL().toExternalForm().contains("/resteasy-uni"));
} catch (MalformedURLException e) {
throw new AssertionError(e);
}
return s;
});
}
@GET
@Path("/resteasy-tc-uni-cs")
public Uni<String> resteasyThreadContextWithCS(@Context UriInfo uriInfo) {
CompletableFuture<String> ret = allTc.withContextCapture(CompletableFuture.completedFuture("OK"));
return Uni.createFrom().completionStage(ret)
.emitOn(executor)
.onItem().transform(s -> {
uriInfo.getAbsolutePath();
return s;
});
}
@GET
@Path("/servlet-uni")
public Uni<String> servletContextPropagation(@Context UriInfo uriInfo) {
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
return Uni.createFrom().item("OK")
.emitOn(Infrastructure.getDefaultExecutor())
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
})
.onFailure().invoke(t -> System.out.println("Got failure " + t.getMessage()));
}
@GET
@Path("/servlet-uni-cs")
public Uni<String> servletContextPropagationWithUniCreatedFromCSWithManagedExecutor(@Context UriInfo uriInfo) {
CompletableFuture<String> ret = all.completedFuture("OK");
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
return Uni.createFrom().completionStage(() -> ret)
.emitOn(Infrastructure.getDefaultExecutor())
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
});
}
@GET
@Path("/servlet-tc-uni-cs")
public Uni<String> servletThreadContext(@Context UriInfo uriInfo) {
CompletableFuture<String> ret = allTc.withContextCapture(CompletableFuture.completedFuture("OK"));
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
return Uni.createFrom().completionStage(() -> ret)
.emitOn(executor)
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
});
}
@GET
@Path("/arc-uni")
public Uni<String> arcContextPropagation() {
Assert.assertTrue(Arc.container().instance(RequestBean.class).isAvailable());
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
return Uni.createFrom().item("OK")
.emitOn(Infrastructure.getDefaultExecutor())
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
});
}
@GET
@Path("/arc-uni-cs")
public Uni<String> arcContextPropagationWithUniCreatedFromCSWithManagedExecutor() {
Assert.assertTrue(Arc.container().instance(RequestBean.class).isAvailable());
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
CompletableFuture<String> ret = all.completedFuture("OK");
return Uni.createFrom().completionStage(() -> ret)
.emitOn(Infrastructure.getDefaultExecutor())
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
});
}
@GET
@Path("/arc-tc-uni")
public Uni<String> arcContextPropagationWithThreadContext() {
ExecutorService executor = Executors.newSingleThreadExecutor();
Assert.assertTrue(Arc.container().instance(RequestBean.class).isAvailable());
RequestBean instance = Arc.container().instance(RequestBean.class).get();
String previousValue = instance.callMe();
CompletableFuture<String> ret = allTc.withContextCapture(CompletableFuture.completedFuture("OK"));
return Uni.createFrom().completionStage(() -> ret)
.emitOn(executor)
.map(text -> {
RequestBean instance2 = Arc.container().instance(RequestBean.class).get();
Assertions.assertEquals(previousValue, instance2.callMe());
return text;
});
}
@Inject
MutinyTransactionalBean txBean;
@Transactional
@GET
@Path("/transaction-uni")
public Uni<String> contextPropagationWithTxAndUni() throws SystemException {
SomeEntity.deleteAll();
Uni<String> ret = Uni.createFrom().item("OK");
SomeEntity entity = new SomeEntity();
entity.name = "Stef";
entity.persist();
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
return ret
.emitOn(executor)
.map(text -> {
Assertions.assertEquals(1, SomeEntity.count());
Transaction t2;
try {
t2 = Panache.getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
Assertions.assertEquals(t1, t2);
return text;
});
}
@Transactional
@GET
@Path("/transaction-uni-cs")
public Uni<String> contextPropagationWithTxAndUniCreatedFromCS() throws SystemException {
Uni<String> ret = Uni.createFrom().completionStage(all.completedFuture("OK"));
SomeOtherEntity entity = new SomeOtherEntity();
entity.name = "Stef";
entity.persist();
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
return ret
.emitOn(executor)
.map(text -> {
Assertions.assertEquals(1, SomeOtherEntity.count());
Transaction t2;
try {
t2 = Panache.getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
Assertions.assertEquals(t1, t2);
return text;
});
}
@Transactional
@GET
@Path("/transaction-tc-uni")
public Uni<String> transactionPropagationWithThreadContextAndUniCreatedFromCS() throws SystemException {
CompletableFuture<String> ret = allTc.withContextCapture(CompletableFuture.completedFuture("OK"));
SomeEntity entity = new SomeEntity();
entity.name = "Stef";
entity.persist();
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
return Uni.createFrom().completionStage(ret)
.emitOn(executor)
.map(text -> {
Assertions.assertEquals(1, SomeEntity.count());
Transaction t2;
try {
t2 = Panache.getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
Assertions.assertEquals(t1, t2);
return text;
});
}
@Transactional
@GET
@Path("/transaction2-uni")
public Uni<String> transactionTest2() {
Uni<String> ret = Uni.createFrom().item("OK");
// check that the first transaction was committed
Assertions.assertEquals(1, SomeEntity.count());
// now delete our entity, but throw an exception to rollback
Assertions.assertEquals(1, SomeEntity.deleteAll());
return ret
.emitOn(executor)
.onItem().failWith(s -> new WebApplicationException(Response.status(Response.Status.CONFLICT).build()));
}
@Transactional
@GET
@Path("/transaction2-uni-cs")
public Uni<String> transactionTest2WithUniCreatedFromCS() {
Uni<String> ret = Uni.createFrom().completionStage(all.completedFuture("OK"));
// check that the first transaction was committed
Assertions.assertEquals(1, SomeOtherEntity.count());
// now delete our entity, but throw an exception to rollback
Assertions.assertEquals(1, SomeOtherEntity.deleteAll());
return ret
.emitOn(executor)
.onItem().failWith(s -> new WebApplicationException(Response.status(Response.Status.CONFLICT).build()));
}
@Transactional
@GET
@Path("/transaction3-uni")
public Uni<String> transactionTest3() {
Uni<String> ret = Uni.createFrom()
.failure(new WebApplicationException(Response.status(Response.Status.CONFLICT).build()));
// check that the second transaction was not committed
Assertions.assertEquals(1, SomeEntity.count());
// now delete our entity, but throw an exception to rollback
Assertions.assertEquals(1, SomeEntity.deleteAll());
return ret;
}
@Transactional
@GET
@Path("/transaction3-uni-cs")
public Uni<String> transactionTest3WithUniCreatedFromCS() {
CompletableFuture<String> future = all
.failedFuture(new WebApplicationException(Response.status(Response.Status.CONFLICT).build()));
Uni<String> ret = Uni.createFrom().completionStage(future);
// check that the second transaction was not committed
Assertions.assertEquals(1, SomeOtherEntity.count());
// now delete our entity, but throw an exception to rollback
Assertions.assertEquals(1, SomeOtherEntity.deleteAll());
return ret;
}
@Transactional
@GET
@Path("/transaction4")
public String transactionTest4() {
// check that the third transaction was not committed
Assertions.assertEquals(1, SomeEntity.count());
// now delete our entity
Assertions.assertEquals(1, SomeEntity.deleteAll());
return "OK";
}
@Transactional
@GET
@Path("/transaction4-cs")
public String transactionTest4CS() {
// check that the third transaction was not committed
Assertions.assertEquals(1, SomeOtherEntity.count());
// now delete our entity
Assertions.assertEquals(1, SomeOtherEntity.deleteAll());
return "OK";
}
@Transactional
@GET
@Path("/transaction-new-sync")
public Uni<String> newTransactionPropagationSynchronous() throws SystemException {
Uni<String> ret = Uni.createFrom().item("OK");
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
txBean.doInTx();
// We should see the transaction already committed even if we're async
Assertions.assertEquals(1, Person.deleteAll());
return ret;
}
@Transactional
@GET
@Path("/transaction-new-uni")
public Uni<String> newTransactionPropagationWithUni() throws SystemException {
Person entity = new Person();
entity.name = "Stef";
entity.persist();
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
// our entity
Assertions.assertEquals(1, Person.count());
return txBean.doInTxUni()
// this makes sure we get executed in another scheduler
.emitOn(executor)
.map(text -> {
// make sure we don't see the other transaction's entity
Transaction t2;
try {
t2 = Panache.getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
Assertions.assertEquals(t1, t2);
try {
Assertions.assertEquals(Status.STATUS_ACTIVE, t2.getStatus());
} catch (SystemException e) {
throw new AssertionError(e);
}
return text;
});
}
@Transactional
@GET
@Path("/transaction-uni-2")
public Uni<String> transactionPropagationWithUni() {
Uni<String> ret = Uni.createFrom().item("OK");
// now delete both entities
Assertions.assertEquals(2, Person.deleteAll());
return ret;
}
@Transactional
@GET
@Path("/transaction-multi")
public Multi<String> transactionPropagationWithMulti() throws SystemException {
Person entity = new Person();
entity.name = "Stef";
entity.persist();
Transaction t1 = Panache.getTransactionManager().getTransaction();
Assertions.assertNotNull(t1);
// our entity
Assertions.assertEquals(1, Person.count());
return txBean.doInTxMulti()
// this makes sure we get executed in another scheduler
.emitOn(Infrastructure.getDefaultExecutor())
.map(text -> {
// make sure we don't see the other transaction's entity
Transaction t2;
try {
t2 = Panache.getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
Assertions.assertEquals(t1, t2);
try {
Assertions.assertEquals(Status.STATUS_ACTIVE, t2.getStatus());
} catch (SystemException e) {
throw new AssertionError(e);
}
return text;
});
}
@Transactional
@GET
@Path("/transaction-multi-2")
public Flow.Publisher<String> transactionPropagationWithMulti2() {
Multi<String> ret = Multi.createFrom().item("OK");
// now delete both entities
Assertions.assertEquals(2, Person.deleteAll());
return ret;
}
@GET
@Path("/bug40852")
public String bug40852() {
var futureW = Uni
.createFrom()
.item("item")
.onItem()
.delayIt()
.by(Duration.ofMillis(100))
.subscribeAsCompletionStage();
futureW.whenComplete((result, error) -> {
Assertions.assertEquals(true, futureW.isDone());
}).join();
return "OK";
}
}
| MutinyContextEndpoint |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-common/runtime/src/main/java/io/quarkus/resteasy/common/runtime/jsonb/QuarkusJsonbSerializer.java | {
"start": 1231,
"end": 3151
} | class ____ extends JsonBindingProvider {
/**
* RESTEasy can already handle these
*/
private static final Set<Class<?>> BUILTIN_DEFAULTS = new HashSet<>(
Arrays.asList(String.class, InputStream.class, FileRange.class, AsyncStreamingOutput.class, DataSource.class,
Reader.class, StreamingOutput.class, byte[].class, File.class));
@Override
public boolean isReadable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
if (BUILTIN_DEFAULTS.contains(type)) {
return false;
}
return isSupportedMediaType(mediaType);
}
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
if (BUILTIN_DEFAULTS.contains(type)) {
return false;
}
return isSupportedMediaType(mediaType) || mediaType.equals(MediaType.APPLICATION_OCTET_STREAM_TYPE)
|| mediaType.isWildcardType();
}
@Override
public void writeTo(Object t, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) throws IOException, WebApplicationException {
httpHeaders.putSingle(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON);
super.writeTo(t, type, genericType, annotations, mediaType, httpHeaders, entityStream);
}
@Override
public CompletionStage<Void> asyncWriteTo(Object t, Class<?> type, Type genericType, Annotation[] annotations,
MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, AsyncOutputStream entityStream) {
httpHeaders.putSingle(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON);
return super.asyncWriteTo(t, type, genericType, annotations, mediaType, httpHeaders, entityStream);
}
}
| QuarkusJsonbSerializer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/xml/HANAXmlTableFunction.java | {
"start": 17833,
"end": 19306
} | class ____ extends XmlTableSetReturningFunctionTypeResolver {
@Override
protected void addSelectableMapping(List<SelectableMapping> selectableMappings, String name, JdbcMapping type, SqmToSqlAstConverter converter) {
if ( isBoolean( type ) ) {
//noinspection unchecked
final JdbcLiteralFormatter<Object> jdbcLiteralFormatter = type.getJdbcLiteralFormatter();
final Dialect dialect = converter.getCreationContext().getDialect();
final WrapperOptions wrapperOptions = converter.getCreationContext().getWrapperOptions();
final Object trueValue = type.convertToRelationalValue( true );
final Object falseValue = type.convertToRelationalValue( false );
final String trueFragment = jdbcLiteralFormatter.toJdbcLiteral( trueValue, dialect, wrapperOptions );
final String falseFragment = jdbcLiteralFormatter.toJdbcLiteral( falseValue, dialect, wrapperOptions );
selectableMappings.add( new SelectableMappingImpl(
"",
name,
new SelectablePath( name ),
"case " + Template.TEMPLATE + "." + name + " when 'true' then " + trueFragment + " when 'false' then " + falseFragment + " end",
null,
"varchar(5)",
null,
null,
null,
null,
null,
false,
false,
false,
false,
false,
false,
type
));
}
else {
super.addSelectableMapping( selectableMappings, name, type, converter );
}
}
}
}
| DB2XmlTableSetReturningFunctionTypeResolver |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/dynamic/DynMethods.java | {
"start": 6709,
"end": 7174
} | class ____ may be an instance of
* @param <E> the type of exception that will be thrown if throwable is an instance
* @throws E if t is an instance of E
*/
@SuppressWarnings("unchecked")
public static <E extends Exception> void throwIfInstance(Throwable t, Class<E> excClass)
throws E {
if (excClass.isAssignableFrom(t.getClass())) {
// the throwable is already an exception, so throw it
throw (E)t;
}
}
public static final | t |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/DataSkewStreamNetworkThroughputBenchmark.java | {
"start": 1928,
"end": 2516
} | class ____ implements ChannelSelector {
private int numberOfChannels;
private int channelIndex = 0;
@Override
public void setup(int numberOfChannels) {
this.numberOfChannels = numberOfChannels;
}
@Override
public int selectChannel(IOReadableWritable record) {
if (channelIndex >= numberOfChannels) {
return 0;
}
return channelIndex++;
}
@Override
public boolean isBroadcast() {
return false;
}
}
}
| DataSkewChannelSelector |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ondeletecascade/OnDeleteManyToManyTest.java | {
"start": 2436,
"end": 2599
} | class ____ {
@Id
long id;
boolean a;
@ManyToMany
@OnDelete(action = OnDeleteAction.CASCADE)
Set<B> bs = new HashSet<>();
}
@Entity(name = "B")
static | A |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java | {
"start": 1684,
"end": 3832
} | class ____ extends Random implements Closeable, Configurable {
public static final Logger LOG =
LoggerFactory.getLogger(OsSecureRandom.class);
private static final long serialVersionUID = 6391500337172057900L;
private transient Configuration conf;
private final int RESERVOIR_LENGTH = 8192;
private String randomDevPath;
private transient InputStream stream;
private final byte[] reservoir = new byte[RESERVOIR_LENGTH];
private int pos = reservoir.length;
private void fillReservoir(int min) {
if (pos >= reservoir.length - min) {
try {
if (stream == null) {
stream = Files.newInputStream(Paths.get(randomDevPath));
}
IOUtils.readFully(stream, reservoir, 0, reservoir.length);
} catch (IOException e) {
throw new RuntimeException("failed to fill reservoir", e);
}
pos = 0;
}
}
public OsSecureRandom() {
}
@VisibleForTesting
public boolean isClosed() {
return stream == null;
}
@Override
synchronized public void setConf(Configuration conf) {
this.conf = conf;
this.randomDevPath = conf.get(
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
close();
}
@Override
synchronized public Configuration getConf() {
return conf;
}
@Override
synchronized public void nextBytes(byte[] bytes) {
int off = 0;
int n = 0;
while (off < bytes.length) {
fillReservoir(0);
n = Math.min(bytes.length - off, reservoir.length - pos);
System.arraycopy(reservoir, pos, bytes, off, n);
off += n;
pos += n;
}
}
@Override
synchronized protected int next(int nbits) {
fillReservoir(4);
int n = 0;
for (int i = 0; i < 4; i++) {
n = ((n << 8) | (reservoir[pos++] & 0xff));
}
return n & (0xffffffff >> (32 - nbits));
}
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
}
@Override
protected void finalize() throws Throwable {
close();
}
}
| OsSecureRandom |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchBuffer.java | {
"start": 1821,
"end": 1999
} | class ____ thread-safe with the intention that {@link ShareCompletedFetch the data} will be
* "produced" by a background thread and consumed by the application thread.
*/
public | is |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/ServiceProviderBuildItem.java | {
"start": 7176,
"end": 7472
} | class ____ that must already be mentioned in the file
*/
public ServiceProviderBuildItem(String serviceInterfaceClassName, String... providerClassNames) {
this(serviceInterfaceClassName, List.of(providerClassNames), false);
}
/**
* Registers the specified service | names |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/helpers/DateLayout.java | {
"start": 1241,
"end": 6591
} | class ____ extends Layout {
/**
* String constant designating no time information. Current value of this constant is <b>NULL</b>.
*
*/
public static final String NULL_DATE_FORMAT = "NULL";
/**
* String constant designating relative time. Current value of this constant is <b>RELATIVE</b>.
*/
public static final String RELATIVE_TIME_DATE_FORMAT = "RELATIVE";
/**
* @deprecated Options are now handled using the JavaBeans paradigm. This constant is not longer needed and will be
* removed in the <em>near</em> term.
*/
@Deprecated
public static final String DATE_FORMAT_OPTION = "DateFormat";
/**
* @deprecated Options are now handled using the JavaBeans paradigm. This constant is not longer needed and will be
* removed in the <em>near</em> term.
*/
@Deprecated
public static final String TIMEZONE_OPTION = "TimeZone";
protected FieldPosition pos = new FieldPosition(0);
private String timeZoneID;
private String dateFormatOption;
protected DateFormat dateFormat;
protected Date date = new Date();
public void activateOptions() {
setDateFormat(dateFormatOption);
if (timeZoneID != null && dateFormat != null) {
dateFormat.setTimeZone(TimeZone.getTimeZone(timeZoneID));
}
}
public void dateFormat(final StringBuffer buf, final LoggingEvent event) {
if (dateFormat != null) {
date.setTime(event.timeStamp);
dateFormat.format(date, buf, this.pos);
buf.append(' ');
}
}
/**
* Returns value of the <b>DateFormat</b> option.
*/
public String getDateFormat() {
return dateFormatOption;
}
/**
* @deprecated Use the setter method for the option directly instead of the generic <code>setOption</code> method.
*/
@Deprecated
public String[] getOptionStrings() {
return new String[] {DATE_FORMAT_OPTION, TIMEZONE_OPTION};
}
/**
* Returns value of the <b>TimeZone</b> option.
*/
public String getTimeZone() {
return timeZoneID;
}
/**
* Sets the {@link DateFormat} used to format time and date in the zone determined by <code>timeZone</code>.
*/
public void setDateFormat(final DateFormat dateFormat, final TimeZone timeZone) {
this.dateFormat = dateFormat;
this.dateFormat.setTimeZone(timeZone);
}
/**
* The value of the <b>DateFormat</b> option should be either an argument to the constructor of {@link SimpleDateFormat}
* or one of the srings "NULL", "RELATIVE", "ABSOLUTE", "DATE" or "ISO8601.
*/
public void setDateFormat(final String dateFormat) {
if (dateFormat != null) {
dateFormatOption = dateFormat;
}
setDateFormat(dateFormatOption, TimeZone.getDefault());
}
/**
* Sets the DateFormat used to format date and time in the time zone determined by <code>timeZone</code> parameter. The
* {@link DateFormat} used will depend on the <code>dateFormatType</code>.
*
* <p>
* The recognized types are {@link #NULL_DATE_FORMAT}, {@link #RELATIVE_TIME_DATE_FORMAT}
* {@link AbsoluteTimeDateFormat#ABS_TIME_DATE_FORMAT}, {@link AbsoluteTimeDateFormat#DATE_AND_TIME_DATE_FORMAT} and
* {@link AbsoluteTimeDateFormat#ISO8601_DATE_FORMAT}. If the <code>dateFormatType</code> is not one of the above, then
* the argument is assumed to be a date pattern for {@link SimpleDateFormat}.
*/
public void setDateFormat(final String dateFormatType, final TimeZone timeZone) {
if (dateFormatType == null) {
this.dateFormat = null;
return;
}
if (dateFormatType.equalsIgnoreCase(NULL_DATE_FORMAT)) {
this.dateFormat = null;
} else if (dateFormatType.equalsIgnoreCase(RELATIVE_TIME_DATE_FORMAT)) {
this.dateFormat = new RelativeTimeDateFormat();
} else if (dateFormatType.equalsIgnoreCase(AbsoluteTimeDateFormat.ABS_TIME_DATE_FORMAT)) {
this.dateFormat = new AbsoluteTimeDateFormat(timeZone);
} else if (dateFormatType.equalsIgnoreCase(AbsoluteTimeDateFormat.DATE_AND_TIME_DATE_FORMAT)) {
this.dateFormat = new DateTimeDateFormat(timeZone);
} else if (dateFormatType.equalsIgnoreCase(AbsoluteTimeDateFormat.ISO8601_DATE_FORMAT)) {
this.dateFormat = new ISO8601DateFormat(timeZone);
} else {
this.dateFormat = new SimpleDateFormat(dateFormatType);
this.dateFormat.setTimeZone(timeZone);
}
}
/**
* @deprecated Use the setter method for the option directly instead of the generic <code>setOption</code> method.
*/
@Deprecated
public void setOption(final String option, final String value) {
if (option.equalsIgnoreCase(DATE_FORMAT_OPTION)) {
dateFormatOption = toRootUpperCase(value);
} else if (option.equalsIgnoreCase(TIMEZONE_OPTION)) {
timeZoneID = value;
}
}
/**
* The <b>TimeZoneID</b> option is a time zone ID string in the format expected by the {@link TimeZone#getTimeZone}
* method.
*/
public void setTimeZone(final String timeZone) {
this.timeZoneID = timeZone;
}
}
| DateLayout |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jdbc/Expectation.java | {
"start": 1272,
"end": 1553
} | class ____ { ... }
* </pre>
*
* @see org.hibernate.annotations.SQLInsert#verify
* @see org.hibernate.annotations.SQLUpdate#verify
* @see org.hibernate.annotations.SQLDelete#verify
* @see org.hibernate.annotations.SQLDeleteAll#verify
*
* @author Steve Ebersole
*/
public | Record |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java | {
"start": 2354,
"end": 10583
} | class ____ implements Filter {
private static final Logger LOG =
LoggerFactory.getLogger(RMWebAppFilter.class);
private Injector injector;
/**
*
*/
private static final long serialVersionUID = 1L;
// define a set of URIs which do not need to do redirection
private static final Set<String> NON_REDIRECTED_URIS = Sets.newHashSet(
"/conf", "/stacks", "/logLevel", "/logs", IsActiveServlet.PATH_SPEC,
"/jmx", "/prom");
private String path;
private boolean ahsEnabled;
private String ahsPageURLPrefix;
private static final int BASIC_SLEEP_TIME = 5;
private static final int MAX_SLEEP_TIME = 5 * 60;
private static final Random randnum = new Random();
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Inject
public RMWebAppFilter(Injector injector, Configuration conf) {
this.injector = injector;
InetSocketAddress sock = YarnConfiguration.useHttps(conf)
? conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT)
: conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
path = sock.getHostName() + ":" + sock.getPort();
path = YarnConfiguration.useHttps(conf)
? "https://" + path
: "http://" + path;
ahsEnabled = conf.getBoolean(
YarnConfiguration.APPLICATION_HISTORY_ENABLED,
YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED);
ahsPageURLPrefix = pjoin(
WebAppUtils.getHttpSchemePrefix(conf) +
WebAppUtils.getAHSWebAppURLWithoutScheme(
conf), "applicationhistory");
}
@Override
public void doFilter(ServletRequest servletRequest,
ServletResponse servletResponse, FilterChain chain) throws IOException,
ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
response.setCharacterEncoding("UTF-8");
String htmlEscapedUri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
if (htmlEscapedUri == null) {
htmlEscapedUri = "/";
}
String uriWithQueryString =
WebAppUtils.appendQueryParams(request, htmlEscapedUri);
String htmlEscapedUriWithQueryString =
WebAppUtils.getHtmlEscapedURIWithQueryString(request);
RMWebApp rmWebApp = injector.getInstance(RMWebApp.class);
rmWebApp.checkIfStandbyRM();
if (rmWebApp.isStandby()
&& shouldRedirect(rmWebApp, htmlEscapedUri)) {
String redirectPath = rmWebApp.getRedirectPath();
if (redirectPath != null && !redirectPath.isEmpty()) {
redirectPath += uriWithQueryString;
String redirectMsg = "This is standby RM. The redirect url is: "
+ htmlEscapedUriWithQueryString;
PrintWriter out = response.getWriter();
out.println(redirectMsg);
response.setHeader("Location", redirectPath);
response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
return;
} else {
boolean doRetry = true;
String retryIntervalStr =
request.getParameter(YarnWebParams.NEXT_REFRESH_INTERVAL);
int retryInterval = 0;
if (retryIntervalStr != null) {
try {
retryInterval = Integer.parseInt(retryIntervalStr.trim());
} catch (NumberFormatException ex) {
doRetry = false;
}
}
int next = calculateExponentialTime(retryInterval);
String redirectUrl =
appendOrReplaceParamter(path + uriWithQueryString,
YarnWebParams.NEXT_REFRESH_INTERVAL + "=" + (retryInterval + 1));
if (redirectUrl == null || next > MAX_SLEEP_TIME) {
doRetry = false;
}
String redirectMsg =
doRetry ? "Can not find any active RM. Will retry in next " + next
+ " seconds." : "There is no active RM right now.";
redirectMsg += "\nHA Zookeeper Connection State: "
+ rmWebApp.getHAZookeeperConnectionState();
PrintWriter out = response.getWriter();
out.println(redirectMsg);
if (doRetry) {
response.setHeader("Refresh", next + ";url=" + redirectUrl);
response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
}
}
return;
} else if (ahsEnabled) {
String ahsRedirectUrl = ahsRedirectPath(uriWithQueryString, rmWebApp);
if(ahsRedirectUrl != null) {
response.setHeader("Location", ahsRedirectUrl);
response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
return;
}
}
chain.doFilter(request, response);
}
private String ahsRedirectPath(String uri, RMWebApp rmWebApp) {
// TODO: Commonize URL parsing code. Will be done in YARN-4642.
String redirectPath = null;
if(uri.contains("/cluster/")) {
String[] parts = uri.split("/");
if(parts.length > 3) {
RMContext context = rmWebApp.getRMContext();
String type = parts[2];
ApplicationId appId = null;
ApplicationAttemptId appAttemptId = null;
ContainerId containerId = null;
switch(type){
case "app":
try {
appId = Apps.toAppID(parts[3]);
} catch (YarnRuntimeException | NumberFormatException e) {
LOG.debug("Error parsing {} as an ApplicationId",
parts[3], e);
return redirectPath;
}
if(!context.getRMApps().containsKey(appId)) {
redirectPath = pjoin(ahsPageURLPrefix, "app", appId);
}
break;
case "appattempt":
try{
appAttemptId = ApplicationAttemptId.fromString(parts[3]);
} catch (IllegalArgumentException e) {
LOG.debug("Error parsing {} as an ApplicationAttemptId",
parts[3], e);
return redirectPath;
}
if(!context.getRMApps().containsKey(
appAttemptId.getApplicationId())) {
redirectPath = pjoin(ahsPageURLPrefix,
"appattempt", appAttemptId);
}
break;
case "container":
try {
containerId = ContainerId.fromString(parts[3]);
} catch (IllegalArgumentException e) {
LOG.debug("Error parsing {} as an ContainerId",
parts[3], e);
return redirectPath;
}
if(!context.getRMApps().containsKey(
containerId.getApplicationAttemptId().getApplicationId())) {
redirectPath = pjoin(ahsPageURLPrefix,
"container", containerId);
}
break;
default:
break;
}
}
}
return redirectPath;
}
private boolean shouldRedirect(RMWebApp rmWebApp, String uri) {
return !uri.equals("/" + rmWebApp.wsName() + "/v1/cluster/info")
&& !uri.equals("/ws/v1/cluster/info")
&& !uri.equals("/" + rmWebApp.name() + "/cluster")
&& !uri.startsWith(ProxyUriUtils.PROXY_BASE)
&& !NON_REDIRECTED_URIS.contains(uri);
}
private String appendOrReplaceParamter(String uri, String newQuery) {
if (uri.contains(YarnWebParams.NEXT_REFRESH_INTERVAL + "=")) {
return uri.replaceAll(YarnWebParams.NEXT_REFRESH_INTERVAL + "=[^&]+",
newQuery);
}
try {
URI oldUri = new URI(uri);
String appendQuery = oldUri.getQuery();
if (appendQuery == null) {
appendQuery = newQuery;
} else {
appendQuery += "&" + newQuery;
}
URI newUri =
new URI(oldUri.getScheme(), oldUri.getAuthority(), oldUri.getPath(),
appendQuery, oldUri.getFragment());
return newUri.toString();
} catch (URISyntaxException e) {
return null;
}
}
private static int calculateExponentialTime(int retries) {
long baseTime = BASIC_SLEEP_TIME * (1L << retries);
return (int) (baseTime * (randnum.nextDouble() + 0.5));
}
@Override
public void destroy() {
}
}
| RMWebAppFilter |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/DiamondInheritanceIsConfusingMockitoTest.java | {
"start": 269,
"end": 798
} | class ____ {
@Test
public void should_work() {
Sub mock = Mockito.mock(Sub.class);
// The following line results in
// org.mockito.exceptions.misusing.MissingMethodInvocationException:
// when() requires an argument which has to be 'a method call on a mock'.
// Presumably confused by the interface/superclass signatures.
Mockito.when(mock.getFoo()).thenReturn("Hello");
assertEquals("Hello", mock.getFoo());
}
public | DiamondInheritanceIsConfusingMockitoTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/methodgenerics/multiple/ReturnTypeHasMultipleTypeVarOneGenericMapper.java | {
"start": 379,
"end": 795
} | interface ____ {
ReturnTypeHasMultipleTypeVarOneGenericMapper INSTANCE =
Mappers.getMapper( ReturnTypeHasMultipleTypeVarOneGenericMapper.class );
Target toTarget(Source source);
default <T> HashMap<String, T> toMap( T entry) {
HashMap<String, T> result = new HashMap<>( );
result.put( "test", entry );
return result;
}
| ReturnTypeHasMultipleTypeVarOneGenericMapper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryResultTypeAutoDiscoveryTest.java | {
"start": 20498,
"end": 20694
} | class ____ extends TestedEntity<Character> {
@Column(length = 1)
public Character getTestedProperty() {
return testedProperty;
}
}
@Entity(name = "char255Entity")
public static | CharEntity |
java | quarkusio__quarkus | integration-tests/spring-web/src/main/java/io/quarkus/it/spring/security/AlwaysFalseChecker.java | {
"start": 108,
"end": 207
} | class ____ {
public boolean check(String input) {
return false;
}
}
| AlwaysFalseChecker |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/manual/MassiveStringSorting.java | {
"start": 2160,
"end": 11793
} | class ____ {
private static final long SEED = 347569784659278346L;
public void testStringSorting() {
File input = null;
File sorted = null;
try {
// the source file
input =
generateFileWithStrings(
300000, "http://some-uri.com/that/is/a/common/prefix/to/all");
// the sorted file
sorted = File.createTempFile("sorted_strings", "txt");
String[] command = {
"/bin/bash",
"-c",
"export LC_ALL=\"C\" && cat \""
+ input.getAbsolutePath()
+ "\" | sort > \""
+ sorted.getAbsolutePath()
+ "\""
};
Process p = null;
try {
p = Runtime.getRuntime().exec(command);
int retCode = p.waitFor();
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode);
}
p = null;
} finally {
if (p != null) {
p.destroy();
}
}
// sort the data
Sorter<String> sorter = null;
BufferedReader reader = null;
BufferedReader verifyReader = null;
MemoryManager mm = null;
try (IOManager ioMan = new IOManagerAsync()) {
mm = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024).build();
TypeSerializer<String> serializer = StringSerializer.INSTANCE;
TypeComparator<String> comparator = new StringComparator(true);
reader = new BufferedReader(new FileReader(input));
MutableObjectIterator<String> inputIterator =
new StringReaderMutableObjectIterator(reader);
sorter =
ExternalSorter.newBuilder(mm, new DummyInvokable(), serializer, comparator)
.maxNumFileHandles(4)
.enableSpilling(ioMan, 0.8f)
.memoryFraction(1.0)
.objectReuse(false)
.largeRecords(true)
.build(inputIterator);
MutableObjectIterator<String> sortedData = sorter.getIterator();
reader.close();
// verify
verifyReader = new BufferedReader(new FileReader(sorted));
String next;
while ((next = verifyReader.readLine()) != null) {
String nextFromStratoSort = sortedData.next("");
Assert.assertNotNull(nextFromStratoSort);
Assert.assertEquals(next, nextFromStratoSort);
}
} finally {
if (reader != null) {
reader.close();
}
if (verifyReader != null) {
verifyReader.close();
}
if (sorter != null) {
sorter.close();
}
if (mm != null) {
mm.shutdown();
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
if (input != null) {
input.delete();
}
if (sorted != null) {
sorted.delete();
}
}
}
@SuppressWarnings("unchecked")
public void testStringTuplesSorting() {
final int numStrings = 300000;
File input = null;
File sorted = null;
try {
// the source file
input =
generateFileWithStringTuples(
numStrings, "http://some-uri.com/that/is/a/common/prefix/to/all");
// the sorted file
sorted = File.createTempFile("sorted_strings", "txt");
String[] command = {
"/bin/bash",
"-c",
"export LC_ALL=\"C\" && cat \""
+ input.getAbsolutePath()
+ "\" | sort > \""
+ sorted.getAbsolutePath()
+ "\""
};
Process p = null;
try {
p = Runtime.getRuntime().exec(command);
int retCode = p.waitFor();
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode);
}
p = null;
} finally {
if (p != null) {
p.destroy();
}
}
// sort the data
Sorter<Tuple2<String, String[]>> sorter = null;
BufferedReader reader = null;
BufferedReader verifyReader = null;
MemoryManager mm = null;
try (IOManager ioMan = new IOManagerAsync()) {
mm = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024).build();
TupleTypeInfo<Tuple2<String, String[]>> typeInfo =
(TupleTypeInfo<Tuple2<String, String[]>>)
new TypeHint<Tuple2<String, String[]>>() {}.getTypeInfo();
TypeSerializer<Tuple2<String, String[]>> serializer =
typeInfo.createSerializer(new SerializerConfigImpl());
TypeComparator<Tuple2<String, String[]>> comparator =
typeInfo.createComparator(
new int[] {0}, new boolean[] {true}, 0, new ExecutionConfig());
reader = new BufferedReader(new FileReader(input));
MutableObjectIterator<Tuple2<String, String[]>> inputIterator =
new StringTupleReaderMutableObjectIterator(reader);
sorter =
ExternalSorter.newBuilder(mm, new DummyInvokable(), serializer, comparator)
.maxNumFileHandles(4)
.enableSpilling(ioMan, 0.8f)
.memoryFraction(1.0)
.objectReuse(false)
.largeRecords(true)
.build(inputIterator);
// use this part to verify that all if good when sorting in memory
// List<MemorySegment> memory = mm.allocatePages(new DummyInvokable(),
// mm.computeNumberOfPages(1024*1024*1024));
// NormalizedKeySorter<Tuple2<String, String[]>> nks = new
// NormalizedKeySorter<Tuple2<String,String[]>>(serializer, comparator, memory);
//
// {
// Tuple2<String, String[]> wi = new Tuple2<String, String[]>("", new
// String[0]);
// while ((wi = inputIterator.next(wi)) != null) {
// Assert.assertTrue(nks.write(wi));
// }
//
// new QuickSort().sort(nks);
// }
//
// MutableObjectIterator<Tuple2<String, String[]>> sortedData =
// nks.getIterator();
MutableObjectIterator<Tuple2<String, String[]>> sortedData = sorter.getIterator();
reader.close();
// verify
verifyReader = new BufferedReader(new FileReader(sorted));
MutableObjectIterator<Tuple2<String, String[]>> verifyIterator =
new StringTupleReaderMutableObjectIterator(verifyReader);
Tuple2<String, String[]> next = new Tuple2<String, String[]>("", new String[0]);
Tuple2<String, String[]> nextFromStratoSort =
new Tuple2<String, String[]>("", new String[0]);
int num = 0;
while ((next = verifyIterator.next(next)) != null) {
num++;
nextFromStratoSort = sortedData.next(nextFromStratoSort);
Assert.assertNotNull(nextFromStratoSort);
Assert.assertEquals(next.f0, nextFromStratoSort.f0);
Assert.assertArrayEquals(next.f1, nextFromStratoSort.f1);
}
Assert.assertNull(sortedData.next(nextFromStratoSort));
Assert.assertEquals(numStrings, num);
} finally {
if (reader != null) {
reader.close();
}
if (verifyReader != null) {
verifyReader.close();
}
if (sorter != null) {
sorter.close();
}
if (mm != null) {
mm.shutdown();
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
if (input != null) {
input.delete();
}
if (sorted != null) {
sorted.delete();
}
}
}
// --------------------------------------------------------------------------------------------
private static final | MassiveStringSorting |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FutureTransformAsyncTest.java | {
"start": 3894,
"end": 4949
} | class ____ {
private Executor executor;
ListenableFuture<String> test() {
ListenableFuture<String> future =
Futures.transform(
Futures.immediateFuture(5),
value -> {
if (value > 5) {
return "large";
} else if (value < 5) {
return "small";
}
return "value: " + value;
},
executor);
return future;
}
}
""")
.doTest();
}
@Test
public void transformAsync_notAllImmediateFutures() {
compilationHelper
.addSourceLines(
"in/Test.java",
"""
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Executor;
| Test |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/PartitionSelectorException.java | {
"start": 282,
"end": 820
} | class ____ extends PartitionException {
private final Partitions partitions;
/**
* Create a {@code UnknownPartitionException} with the specified detail message.
*
* @param msg the detail message.
* @param partitions read-only view of the current topology view.
*/
public PartitionSelectorException(String msg, Partitions partitions) {
super(msg);
this.partitions = partitions;
}
public Partitions getPartitions() {
return partitions;
}
}
| PartitionSelectorException |
java | apache__camel | components/camel-flatpack/src/test/java/org/apache/camel/component/flatpack/XMLNoSplitRowsTest.java | {
"start": 1550,
"end": 4470
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(XMLNoSplitRowsTest.class);
@EndpointInject("mock:results")
protected MockEndpoint results;
protected String[] expectedFirstName = { "JOHN", "JIMMY", "JANE", "FRED" };
@Test
public void testHeaderAndTrailer() throws Exception {
results.expectedMessageCount(1);
results.message(0).body().isInstanceOf(Document.class);
results.message(0).header("camelFlatpackCounter").isEqualTo(6);
results.assertIsSatisfied();
Document data = results.getExchanges().get(0).getIn().getBody(Document.class);
Element docElement = data.getDocumentElement();
assertEquals("Dataset", docElement.getTagName());
// assert header
Element header = (Element) docElement.getElementsByTagName("DatasetHeader").item(0);
NodeList headerNodes = header.getElementsByTagName("Column");
for (int i = 0; i < headerNodes.getLength(); i++) {
Element column = (Element) headerNodes.item(i);
if (column.getAttribute("name").equals("INDICATOR")) {
assertEquals("HBT", column.getTextContent());
} else if (column.getAttribute("name").equals("DATE")) {
assertEquals("20080817", column.getTextContent());
} else {
fail("Invalid Header Field");
}
}
// assert body
NodeList list = docElement.getElementsByTagName("DatasetRecord");
for (int counter = 0; counter < list.getLength(); counter++) {
Element record = (Element) list.item(counter);
NodeList columnNodes = record.getElementsByTagName("Column");
boolean firstNameFound = false;
for (int i = 0; i < columnNodes.getLength(); i++) {
Element column = (Element) columnNodes.item(i);
if (column.getAttribute("name").equals("FIRSTNAME")) {
assertEquals(expectedFirstName[counter], column.getTextContent());
firstNameFound = true;
}
}
assertTrue(firstNameFound);
LOG.info("Result: {} = {}", counter, record);
}
// assert trailer
Element trailer = (Element) docElement.getElementsByTagName("DatasetTrailer").item(0);
NodeList trailerNodes = trailer.getElementsByTagName("Column");
for (int i = 0; i < trailerNodes.getLength(); i++) {
Element column = (Element) trailerNodes.item(i);
if (column.getAttribute("name").equals("INDICATOR")) {
assertEquals("FBT", column.getTextContent());
} else if (column.getAttribute("name").equals("STATUS")) {
assertEquals("SUCCESS", column.getTextContent());
} else {
fail("Invalid Trailer Field");
}
}
}
}
| XMLNoSplitRowsTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java | {
"start": 2378,
"end": 5688
} | class ____ extends FsUsage {
public static final String NAME = "df";
public static final String USAGE = "[-h] [<path> ...]";
public static final String DESCRIPTION =
"Shows the capacity, free and used space of the filesystem. "+
"If the filesystem has multiple partitions, and no path to a " +
"particular partition is specified, then the status of the root " +
"partitions will be shown.\n" +
"-h: Formats the sizes of files in a human-readable fashion " +
"rather than a number of bytes.";
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h");
cf.parse(args);
setHumanReadable(cf.getOpt("h"));
if (args.isEmpty()) args.add(Path.SEPARATOR);
}
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
setUsagesTable(new TableBuilder(
"Filesystem", "Size", "Used", "Available", "Use%", "Mounted on"));
getUsagesTable().setRightAlign(1, 2, 3, 4);
super.processArguments(args);
if (!getUsagesTable().isEmpty()) {
getUsagesTable().printToStream(out);
}
}
/**
* Add a new row to the usages table for the given FileSystem URI.
*
* @param uri - FileSystem URI
* @param fsStatus - FileSystem status
* @param mountedOnPath - FileSystem mounted on path
*/
private void addToUsagesTable(URI uri, FsStatus fsStatus,
String mountedOnPath) {
long size = fsStatus.getCapacity();
long used = fsStatus.getUsed();
long free = fsStatus.getRemaining();
getUsagesTable().addRow(
uri,
formatSize(size),
formatSize(used),
formatSize(free),
StringUtils.formatPercent((double) used / (double) size, 0),
mountedOnPath
);
}
@Override
protected void processPath(PathData item) throws IOException {
if (ViewFileSystemUtil.isViewFileSystem(item.fs)
|| ViewFileSystemUtil.isViewFileSystemOverloadScheme(item.fs)) {
ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs;
Map<ViewFileSystem.MountPoint, FsStatus> fsStatusMap =
ViewFileSystemUtil.getStatus(viewFileSystem, item.path);
for (Map.Entry<ViewFileSystem.MountPoint, FsStatus> entry :
fsStatusMap.entrySet()) {
ViewFileSystem.MountPoint viewFsMountPoint = entry.getKey();
FsStatus fsStatus = entry.getValue();
// Add the viewfs mount point status to report
URI[] mountPointFileSystemURIs =
viewFsMountPoint.getTargetFileSystemURIs();
// Since LinkMerge is not supported yet, we
// should ideally see mountPointFileSystemURIs
// array with only one element.
addToUsagesTable(mountPointFileSystemURIs[0],
fsStatus, viewFsMountPoint.getMountedOnPath().toString());
}
} else {
// Hide the columns specific to ViewFileSystem
getUsagesTable().setColumnHide(5, true);
FsStatus fsStatus = item.fs.getStatus(item.path);
addToUsagesTable(item.fs.getUri(), fsStatus, "/");
}
}
}
/** show disk usage */
public static | Df |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/context/showcase/WithMockUserParentTests.java | {
"start": 1762,
"end": 2150
} | class ____ extends WithMockUserParent {
@Autowired
private MessageService messageService;
@Test
public void getMessageWithMockUser() {
String message = this.messageService.getMessage();
assertThat(message).contains("user");
}
@Configuration
@EnableMethodSecurity
@EnableWebSecurity
@ComponentScan(basePackageClasses = HelloMessageService.class)
static | WithMockUserParentTests |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 29695,
"end": 29813
} | interface ____ {",
" C.Builder c();",
"",
" @Subcomponent.Builder",
" | B |
java | spring-projects__spring-framework | spring-expression/src/main/java/org/springframework/expression/spel/ast/ValueRef.java | {
"start": 2663,
"end": 3308
} | class ____ implements ValueRef {
private final TypedValue typedValue;
private final SpelNodeImpl node; // used only for error reporting
public TypedValueHolderValueRef(TypedValue typedValue, SpelNodeImpl node) {
this.typedValue = typedValue;
this.node = node;
}
@Override
public TypedValue getValue() {
return this.typedValue;
}
@Override
public void setValue(@Nullable Object newValue) {
throw new SpelEvaluationException(
this.node.getStartPosition(), SpelMessage.NOT_ASSIGNABLE, this.node.toStringAST());
}
@Override
public boolean isWritable() {
return false;
}
}
}
| TypedValueHolderValueRef |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_containsSubSequence_Test.java | {
"start": 884,
"end": 1290
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.containsSubsequence("Luke", "Yoda");
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsSubsequence(info(), internalArray(), array("Luke", "Yoda"));
}
}
| AtomicReferenceArrayAssert_containsSubSequence_Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java | {
"start": 5696,
"end": 29715
} | enum ____ {
OS_TYPE_LINUX,
OS_TYPE_WIN,
OS_TYPE_SOLARIS,
OS_TYPE_MAC,
OS_TYPE_FREEBSD,
OS_TYPE_OTHER
}
/**
* Get the type of the operating system, as determined from parsing
* the <code>os.name</code> property.
*/
public static final OSType osType = getOSType();
private static OSType getOSType() {
String osName = System.getProperty("os.name");
if (osName.startsWith("Windows")) {
return OSType.OS_TYPE_WIN;
} else if (osName.contains("SunOS") || osName.contains("Solaris")) {
return OSType.OS_TYPE_SOLARIS;
} else if (osName.contains("Mac")) {
return OSType.OS_TYPE_MAC;
} else if (osName.contains("FreeBSD")) {
return OSType.OS_TYPE_FREEBSD;
} else if (osName.startsWith("Linux")) {
return OSType.OS_TYPE_LINUX;
} else {
// Some other form of Unix
return OSType.OS_TYPE_OTHER;
}
}
// Helper static vars for each platform
public static final boolean WINDOWS = (osType == OSType.OS_TYPE_WIN);
public static final boolean SOLARIS = (osType == OSType.OS_TYPE_SOLARIS);
public static final boolean MAC = (osType == OSType.OS_TYPE_MAC);
public static final boolean FREEBSD = (osType == OSType.OS_TYPE_FREEBSD);
public static final boolean LINUX = (osType == OSType.OS_TYPE_LINUX);
public static final boolean OTHER = (osType == OSType.OS_TYPE_OTHER);
public static final boolean PPC_64
= System.getProperties().getProperty("os.arch").contains("ppc64");
/**
* a Unix command to get the current user's groups list.
*
* @return group command array.
*/
public static String[] getGroupsCommand() {
return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
: new String[]{"groups"};
}
/**
* A command to get a given user's groups list.
* If the OS is not WINDOWS, the command will get the user's primary group
* first and finally get the groups list which includes the primary group.
* i.e. the user's primary group will be included twice.
*
* @param user user.
* @return groups for user command.
*/
public static String[] getGroupsForUserCommand(final String user) {
//'groups username' command return is inconsistent across different unixes
if (WINDOWS) {
return new String[]
{getWinUtilsPath(), "groups", "-F", "\"" + user + "\""};
} else {
String quotedUser = bashQuote(user);
return new String[] {"bash", "-c", "id -gn " + quotedUser +
"; id -Gn " + quotedUser};
}
}
/**
* A command to get a given user's group id list.
* The command will get the user's primary group
* first and finally get the groups list which includes the primary group.
* i.e. the user's primary group will be included twice.
* This command does not support Windows and will only return group names.
*
* @param user user.
* @return groups id for user command.
*/
public static String[] getGroupsIDForUserCommand(final String user) {
//'groups username' command return is inconsistent across different unixes
if (WINDOWS) {
return new String[]{getWinUtilsPath(), "groups", "-F", "\"" + user +
"\""};
} else {
String quotedUser = bashQuote(user);
return new String[] {"bash", "-c", "id -g " + quotedUser + "; id -G " +
quotedUser};
}
}
/**
* A command to get a given netgroup's user list.
*
* @param netgroup net group.
* @return users for net group command.
*/
public static String[] getUsersForNetgroupCommand(final String netgroup) {
//'groups username' command return is non-consistent across different unixes
return new String[] {"getent", "netgroup", netgroup};
}
/**
* Return a command to get permission information.
*
* @return permission command.
*/
public static String[] getGetPermissionCommand() {
return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
: new String[] { "ls", "-ld" };
}
/**
* Return a command to set permission.
*
* @param perm permission.
* @param recursive recursive.
* @return set permission command.
*/
public static String[] getSetPermissionCommand(String perm, boolean recursive) {
if (recursive) {
return (WINDOWS) ?
new String[] { getWinUtilsPath(), "chmod", "-R", perm }
: new String[] { "chmod", "-R", perm };
} else {
return (WINDOWS) ?
new String[] { getWinUtilsPath(), "chmod", perm }
: new String[] { "chmod", perm };
}
}
/**
* Return a command to set permission for specific file.
*
* @param perm String permission to set
* @param recursive boolean true to apply to all sub-directories recursively
* @param file String file to set
* @return String[] containing command and arguments
*/
public static String[] getSetPermissionCommand(String perm,
boolean recursive,
String file) {
String[] baseCmd = getSetPermissionCommand(perm, recursive);
String[] cmdWithFile = Arrays.copyOf(baseCmd, baseCmd.length + 1);
cmdWithFile[cmdWithFile.length - 1] = file;
return cmdWithFile;
}
/**
* Return a command to set owner.
*
* @param owner owner.
* @return set owner command.
*/
public static String[] getSetOwnerCommand(String owner) {
return (WINDOWS) ?
new String[] { getWinUtilsPath(), "chown", "\"" + owner + "\"" }
: new String[] { "chown", owner };
}
/**
* Return a command to create symbolic links.
*
* @param target target.
* @param link link.
* @return symlink command.
*/
public static String[] getSymlinkCommand(String target, String link) {
return WINDOWS ?
new String[] { getWinUtilsPath(), "symlink", link, target }
: new String[] { "ln", "-s", target, link };
}
/**
* Return a command to read the target of the a symbolic link.
*
* @param link link.
* @return read link command.
*/
public static String[] getReadlinkCommand(String link) {
return WINDOWS ?
new String[] { getWinUtilsPath(), "readlink", link }
: new String[] { "readlink", link };
}
/**
* Return a command for determining if process with specified pid is alive.
* @param pid process ID
* @return a <code>kill -0</code> command or equivalent
*/
public static String[] getCheckProcessIsAliveCommand(String pid) {
return getSignalKillCommand(0, pid);
}
/**
* Return a command to send a signal to a given pid.
*
* @param code code.
* @param pid pid.
* @return signal kill command.
*/
public static String[] getSignalKillCommand(int code, String pid) {
// Code == 0 means check alive
if (Shell.WINDOWS) {
if (0 == code) {
return new String[] {Shell.getWinUtilsPath(), "task", "isAlive", pid };
} else {
return new String[] {Shell.getWinUtilsPath(), "task", "kill", pid };
}
}
// Use the bash-builtin instead of the Unix kill command (usually
// /bin/kill) as the bash-builtin supports "--" in all Hadoop supported
// OSes.
final String quotedPid = bashQuote(pid);
if (isSetsidAvailable) {
return new String[] { "bash", "-c", "kill -" + code + " -- -" +
quotedPid };
} else {
return new String[] { "bash", "-c", "kill -" + code + " " +
quotedPid };
}
}
/** Regular expression for environment variables: {@value}. */
public static final String ENV_NAME_REGEX = "[A-Za-z_][A-Za-z0-9_]*";
/**
* Return a regular expression string that match environment variables.
*
* @return environment variable regex.
*/
public static String getEnvironmentVariableRegex() {
return (WINDOWS)
? "%(" + ENV_NAME_REGEX + "?)%"
: "\\$(" + ENV_NAME_REGEX + ")";
}
/**
* Returns a File referencing a script with the given basename, inside the
* given parent directory. The file extension is inferred by platform:
* <code>".cmd"</code> on Windows, or <code>".sh"</code> otherwise.
*
* @param parent File parent directory
* @param basename String script file basename
* @return File referencing the script in the directory
*/
public static File appendScriptExtension(File parent, String basename) {
return new File(parent, appendScriptExtension(basename));
}
/**
* Returns a script file name with the given basename.
*
* The file extension is inferred by platform:
* <code>".cmd"</code> on Windows, or <code>".sh"</code> otherwise.
*
* @param basename String script file basename
* @return String script file name
*/
public static String appendScriptExtension(String basename) {
return basename + (WINDOWS ? ".cmd" : ".sh");
}
/**
* Returns a command to run the given script. The script interpreter is
* inferred by platform: cmd on Windows or bash otherwise.
*
* @param script File script to run
* @return String[] command to run the script
*/
public static String[] getRunScriptCommand(File script) {
String absolutePath = script.getAbsolutePath();
return WINDOWS ?
new String[] {"cmd", "/c", absolutePath }
: new String[] {"bash", bashQuote(absolutePath) };
}
/** a Unix command to set permission: {@value}. */
public static final String SET_PERMISSION_COMMAND = "chmod";
/** a Unix command to set owner: {@value}. */
public static final String SET_OWNER_COMMAND = "chown";
/** a Unix command to set the change user's groups list: {@value}. */
public static final String SET_GROUP_COMMAND = "chgrp";
/** a Unix command to create a link: {@value}. */
public static final String LINK_COMMAND = "ln";
/** a Unix command to get a link target: {@value}. */
public static final String READ_LINK_COMMAND = "readlink";
/**Time after which the executing script would be timedout. */
protected long timeOutInterval = 0L;
/** If or not script timed out*/
private final AtomicBoolean timedOut = new AtomicBoolean(false);
/** Indicates if the parent env vars should be inherited or not*/
protected boolean inheritParentEnv = true;
/**
* Centralized logic to discover and validate the sanity of the Hadoop
* home directory.
*
* This does a lot of work so it should only be called
* privately for initialization once per process.
*
* @return A directory that exists and via was specified on the command line
* via <code>-Dhadoop.home.dir</code> or the <code>HADOOP_HOME</code>
* environment variable.
* @throws FileNotFoundException if the properties are absent or the specified
* path is not a reference to a valid directory.
*/
private static File checkHadoopHome() throws FileNotFoundException {
// first check the Dflag hadoop.home.dir with JVM scope
String home = System.getProperty(SYSPROP_HADOOP_HOME_DIR);
// fall back to the system/user-global env variable
if (home == null) {
home = System.getenv(ENV_HADOOP_HOME);
}
return checkHadoopHomeInner(home);
}
/*
A set of exception strings used to construct error messages;
these are referred to in tests
*/
static final String E_DOES_NOT_EXIST = "does not exist";
static final String E_IS_RELATIVE = "is not an absolute path.";
static final String E_NOT_DIRECTORY = "is not a directory.";
static final String E_NO_EXECUTABLE = "Could not locate Hadoop executable";
static final String E_NOT_EXECUTABLE_FILE = "Not an executable file";
static final String E_HADOOP_PROPS_UNSET = ENV_HADOOP_HOME + " and "
+ SYSPROP_HADOOP_HOME_DIR + " are unset.";
static final String E_HADOOP_PROPS_EMPTY = ENV_HADOOP_HOME + " or "
+ SYSPROP_HADOOP_HOME_DIR + " set to an empty string";
static final String E_NOT_A_WINDOWS_SYSTEM = "Not a Windows system";
/**
* Validate the accessibility of the Hadoop home directory.
*
* @return A directory that is expected to be the hadoop home directory
* @throws FileNotFoundException if the specified
* path is not a reference to a valid directory.
*/
@VisibleForTesting
static File checkHadoopHomeInner(String home) throws FileNotFoundException {
// couldn't find either setting for hadoop's home directory
if (home == null) {
throw new FileNotFoundException(E_HADOOP_PROPS_UNSET);
}
// strip off leading and trailing double quotes
while (home.startsWith("\"")) {
home = home.substring(1);
}
while (home.endsWith("\"")) {
home = home.substring(0, home.length() - 1);
}
// after stripping any quotes, check for home dir being non-empty
if (home.isEmpty()) {
throw new FileNotFoundException(E_HADOOP_PROPS_EMPTY);
}
// check that the hadoop home dir value
// is an absolute reference to a directory
File homedir = new File(home);
if (!homedir.isAbsolute()) {
throw new FileNotFoundException("Hadoop home directory " + homedir
+ " " + E_IS_RELATIVE);
}
if (!homedir.exists()) {
throw new FileNotFoundException("Hadoop home directory " + homedir
+ " " + E_DOES_NOT_EXIST);
}
if (!homedir.isDirectory()) {
throw new FileNotFoundException("Hadoop home directory " + homedir
+ " "+ E_NOT_DIRECTORY);
}
return homedir;
}
/**
* The Hadoop home directory.
*/
private static final File HADOOP_HOME_FILE;
/**
* Rethrowable cause for the failure to determine the hadoop
* home directory
*/
private static final IOException HADOOP_HOME_DIR_FAILURE_CAUSE;
static {
File home;
IOException ex;
try {
home = checkHadoopHome();
ex = null;
} catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to detect a valid hadoop home directory", ioe);
}
ex = ioe;
home = null;
}
HADOOP_HOME_FILE = home;
HADOOP_HOME_DIR_FAILURE_CAUSE = ex;
}
/**
* Optionally extend an error message with some OS-specific text.
* @param message core error message
* @return error message, possibly with some extra text
*/
private static String addOsText(String message) {
return WINDOWS ? (message + " -see " + WINDOWS_PROBLEMS) : message;
}
/**
* Create a {@code FileNotFoundException} with the inner nested cause set
* to the given exception. Compensates for the fact that FNFE doesn't
* have an initializer that takes an exception.
* @param text error text
* @param ex inner exception
* @return a new exception to throw.
*/
private static FileNotFoundException fileNotFoundException(String text,
Exception ex) {
return (FileNotFoundException) new FileNotFoundException(text)
.initCause(ex);
}
/**
* Get the Hadoop home directory. Raises an exception if not found
* @return the home dir
* @throws IOException if the home directory cannot be located.
*/
public static String getHadoopHome() throws IOException {
return getHadoopHomeDir().getCanonicalPath();
}
/**
* Get the Hadoop home directory. If it is invalid,
* throw an exception.
* @return a path referring to hadoop home.
* @throws FileNotFoundException if the directory doesn't exist.
*/
private static File getHadoopHomeDir() throws FileNotFoundException {
if (HADOOP_HOME_DIR_FAILURE_CAUSE != null) {
throw fileNotFoundException(
addOsText(HADOOP_HOME_DIR_FAILURE_CAUSE.toString()),
HADOOP_HOME_DIR_FAILURE_CAUSE);
}
return HADOOP_HOME_FILE;
}
/**
* Fully qualify the path to a binary that should be in a known hadoop
* bin location. This is primarily useful for disambiguating call-outs
* to executable sub-components of Hadoop to avoid clashes with other
* executables that may be in the path. Caveat: this call doesn't
* just format the path to the bin directory. It also checks for file
* existence of the composed path. The output of this call should be
* cached by callers.
*
* @param executable executable
* @return executable file reference
* @throws FileNotFoundException if the path does not exist
*/
public static File getQualifiedBin(String executable)
throws FileNotFoundException {
// construct hadoop bin path to the specified executable
return getQualifiedBinInner(getHadoopHomeDir(), executable);
}
/**
* Inner logic of {@link #getQualifiedBin(String)}, accessible
* for tests.
* @param hadoopHomeDir home directory (assumed to be valid)
* @param executable executable
* @return path to the binary
* @throws FileNotFoundException if the executable was not found/valid
*/
static File getQualifiedBinInner(File hadoopHomeDir, String executable)
throws FileNotFoundException {
String binDirText = "Hadoop bin directory ";
File bin = new File(hadoopHomeDir, "bin");
if (!bin.exists()) {
throw new FileNotFoundException(addOsText(binDirText + E_DOES_NOT_EXIST
+ ": " + bin));
}
if (!bin.isDirectory()) {
throw new FileNotFoundException(addOsText(binDirText + E_NOT_DIRECTORY
+ ": " + bin));
}
File exeFile = new File(bin, executable);
if (!exeFile.exists()) {
throw new FileNotFoundException(
addOsText(E_NO_EXECUTABLE + ": " + exeFile));
}
if (!exeFile.isFile()) {
throw new FileNotFoundException(
addOsText(E_NOT_EXECUTABLE_FILE + ": " + exeFile));
}
try {
return exeFile.getCanonicalFile();
} catch (IOException e) {
// this isn't going to happen, because of all the upfront checks.
// so if it does, it gets converted to a FNFE and rethrown
throw fileNotFoundException(e.toString(), e);
}
}
/**
* Fully qualify the path to a binary that should be in a known hadoop
* bin location. This is primarily useful for disambiguating call-outs
* to executable sub-components of Hadoop to avoid clashes with other
* executables that may be in the path. Caveat: this call doesn't
* just format the path to the bin directory. It also checks for file
* existence of the composed path. The output of this call should be
* cached by callers.
*
* @param executable executable
* @return executable file reference
* @throws FileNotFoundException if the path does not exist
* @throws IOException on path canonicalization failures
*/
public static String getQualifiedBinPath(String executable)
throws IOException {
return getQualifiedBin(executable).getCanonicalPath();
}
/**
* Location of winutils as a string; null if not found.
* <p>
* <i>Important: caller must check for this value being null</i>.
* The lack of such checks has led to many support issues being raised.
* <p>
* @deprecated use one of the exception-raising getter methods,
* specifically {@link #getWinUtilsPath()} or {@link #getWinUtilsFile()}
*/
@Deprecated
public static final String WINUTILS;
/** Canonical path to winutils, private to Shell. */
private static final String WINUTILS_PATH;
/** file reference to winutils. */
private static final File WINUTILS_FILE;
/** the exception raised on a failure to init the WINUTILS fields. */
private static final IOException WINUTILS_FAILURE;
/*
* Static WINUTILS_* field initializer.
* On non-Windows systems sets the paths to null, and
* adds a specific exception to the failure cause, so
* that on any attempt to resolve the paths will raise
* a meaningful exception.
*/
static {
IOException ioe = null;
String path = null;
File file = null;
// invariant: either there's a valid file and path,
// or there is a cached IO exception.
if (WINDOWS) {
try {
file = getQualifiedBin(WINUTILS_EXE);
path = file.getCanonicalPath();
ioe = null;
} catch (IOException e) {
LOG.warn("Did not find {}: {}", WINUTILS_EXE, e);
// stack trace comes at debug level
LOG.debug("Failed to find " + WINUTILS_EXE, e);
file = null;
path = null;
ioe = e;
}
} else {
// on a non-windows system, the invariant is kept
// by adding an explicit exception.
ioe = new FileNotFoundException(E_NOT_A_WINDOWS_SYSTEM);
}
WINUTILS_PATH = path;
WINUTILS_FILE = file;
WINUTILS = path;
WINUTILS_FAILURE = ioe;
}
/**
* Predicate to indicate whether or not the path to winutils is known.
*
* If true, then {@link #WINUTILS} is non-null, and both
* {@link #getWinUtilsPath()} and {@link #getWinUtilsFile()}
* will successfully return this value. Always false on non-windows systems.
* @return true if there is a valid path to the binary
*/
public static boolean hasWinutilsPath() {
return WINUTILS_PATH != null;
}
/**
* Locate the winutils binary, or fail with a meaningful
* exception and stack trace as an RTE.
* This method is for use in methods which don't explicitly throw
* an <code>IOException</code>.
* @return the path to {@link #WINUTILS_EXE}
* @throws RuntimeException if the path is not resolvable
*/
public static String getWinUtilsPath() {
if (WINUTILS_FAILURE == null) {
return WINUTILS_PATH;
} else {
throw new RuntimeException(WINUTILS_FAILURE.toString(),
WINUTILS_FAILURE);
}
}
/**
* Get a file reference to winutils.
* Always raises an exception if there isn't one
* @return the file instance referring to the winutils bin.
* @throws FileNotFoundException on any failure to locate that file.
*/
public static File getWinUtilsFile() throws FileNotFoundException {
if (WINUTILS_FAILURE == null) {
return WINUTILS_FILE;
} else {
// raise a new exception to generate a new stack trace
throw fileNotFoundException(WINUTILS_FAILURE.toString(),
WINUTILS_FAILURE);
}
}
public static boolean checkIsBashSupported() throws InterruptedIOException {
if (Shell.WINDOWS) {
return false;
}
ShellCommandExecutor shexec;
boolean supported = true;
try {
String[] args = {"bash", "-c", "echo 1000"};
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (InterruptedIOException iioe) {
LOG.warn("Interrupted, unable to determine if bash is supported", iioe);
throw iioe;
} catch (IOException ioe) {
LOG.warn("Bash is not supported by the OS", ioe);
supported = false;
} catch (SecurityException se) {
LOG.info("Bash execution is not allowed by the JVM " +
"security manager.Considering it not supported.");
supported = false;
}
return supported;
}
/**
* Flag which is true if setsid exists.
*/
public static final boolean isSetsidAvailable = isSetsidSupported();
/**
* Look for <code>setsid</code>.
* @return true if <code>setsid</code> was present
*/
private static boolean isSetsidSupported() {
if (Shell.WINDOWS) {
return false;
}
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
String[] args = {"setsid", "bash", "-c", "echo $$"};
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.debug("setsid is not available on this machine. So not using it.");
setsidSupported = false;
} catch (SecurityException se) {
LOG.debug("setsid is not allowed to run by the JVM "+
"security manager. So not using it.");
setsidSupported = false;
} catch (Error err) {
if (err.getMessage() != null
&& err.getMessage().contains("posix_spawn is not " +
"a supported process launch mechanism")
&& (Shell.FREEBSD || Shell.MAC)) {
// HADOOP-11924: This is a workaround to avoid failure of | OSType |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/converter/FormHttpMessageConverterTests.java | {
"start": 17890,
"end": 18805
} | class ____ implements UploadContext {
private final MockHttpOutputMessage outputMessage;
private final byte[] body;
private MockHttpOutputMessageRequestContext(MockHttpOutputMessage outputMessage) {
this.outputMessage = outputMessage;
this.body = this.outputMessage.getBodyAsBytes();
}
@Override
public String getCharacterEncoding() {
MediaType type = this.outputMessage.getHeaders().getContentType();
return (type != null && type.getCharset() != null ? type.getCharset().name() : null);
}
@Override
public String getContentType() {
MediaType type = this.outputMessage.getHeaders().getContentType();
return (type != null ? type.toString() : null);
}
@Override
public InputStream getInputStream() {
return new ByteArrayInputStream(body);
}
@Override
public long contentLength() {
return body.length;
}
}
public static | MockHttpOutputMessageRequestContext |
java | elastic__elasticsearch | x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheServiceTests.java | {
"start": 2544,
"end": 17424
} | class ____ extends AbstractSearchableSnapshotsTestCase {
private static FSyncTrackingFileSystemProvider fileSystemProvider;
@BeforeClass
public static void installFileSystem() {
fileSystemProvider = new FSyncTrackingFileSystemProvider(PathUtils.getDefaultFileSystem(), createTempDir());
PathUtilsForTesting.installMock(fileSystemProvider.getFileSystem(null));
}
@AfterClass
public static void removeFileSystem() {
fileSystemProvider.tearDown();
fileSystemProvider = null;
}
public void testCacheSynchronization() throws Exception {
final int numShards = randomIntBetween(1, 3);
final Index index = new Index(randomAlphaOfLength(5).toLowerCase(Locale.ROOT), UUIDs.randomBase64UUID(random()));
final String snapshotUUID = UUIDs.randomBase64UUID(random());
final String snapshotIndexName = UUIDs.randomBase64UUID(random());
logger.debug("--> creating shard cache directories on disk");
final Path[] shardsCacheDirs = new Path[numShards];
for (int i = 0; i < numShards; i++) {
final Path shardDataPath = randomShardPath(new ShardId(index, i));
assertFalse(Files.exists(shardDataPath));
logger.debug("--> creating directories [{}] for shard [{}]", shardDataPath.toAbsolutePath(), i);
shardsCacheDirs[i] = Files.createDirectories(CacheService.resolveSnapshotCache(shardDataPath).resolve(snapshotUUID));
}
try (CacheService cacheService = defaultCacheService()) {
logger.debug("--> setting large cache sync interval (explicit cache synchronization calls in test)");
cacheService.setCacheSyncInterval(TimeValue.timeValueMillis(Long.MAX_VALUE));
cacheService.start();
// Keep a count of the number of writes for every cache file existing in the cache
final Map<CacheKey, Tuple<CacheFile, Integer>> previous = new HashMap<>();
for (int iteration = 0; iteration < between(1, 10); iteration++) {
final Map<CacheKey, Tuple<CacheFile, Integer>> updates = new HashMap<>();
logger.trace("--> more random reads/writes from existing cache files");
for (Map.Entry<CacheKey, Tuple<CacheFile, Integer>> cacheEntry : randomSubsetOf(previous.entrySet())) {
final CacheKey cacheKey = cacheEntry.getKey();
final CacheFile cacheFile = cacheEntry.getValue().v1();
final CacheFile.EvictionListener listener = evictedCacheFile -> {};
cacheFile.acquire(listener);
final SortedSet<ByteRange> newCacheRanges = randomPopulateAndReads(cacheFile);
assertThat(cacheService.isCacheFileToSync(cacheFile), is(newCacheRanges.isEmpty() == false));
if (newCacheRanges.isEmpty() == false) {
final int numberOfWrites = cacheEntry.getValue().v2() + 1;
updates.put(cacheKey, Tuple.tuple(cacheFile, numberOfWrites));
}
cacheFile.release(listener);
}
logger.trace("--> creating new cache files and randomly read/write them");
for (int i = 0; i < between(1, 25); i++) {
final ShardId shardId = new ShardId(index, randomIntBetween(0, numShards - 1));
final String fileName = Strings.format("file_%d_%d", iteration, i);
final CacheKey cacheKey = new CacheKey(snapshotUUID, snapshotIndexName, shardId, fileName);
final CacheFile cacheFile = cacheService.get(cacheKey, randomIntBetween(0, 10_000), shardsCacheDirs[shardId.id()]);
final CacheFile.EvictionListener listener = evictedCacheFile -> {};
cacheFile.acquire(listener);
final SortedSet<ByteRange> newRanges = randomPopulateAndReads(cacheFile);
assertThat(cacheService.isCacheFileToSync(cacheFile), is(newRanges.isEmpty() == false));
updates.put(cacheKey, Tuple.tuple(cacheFile, newRanges.isEmpty() ? 0 : 1));
cacheFile.release(listener);
}
logger.trace("--> evicting random cache files");
final Map<CacheFile, Integer> evictions = new HashMap<>();
for (CacheKey evictedCacheKey : randomSubsetOf(Sets.union(previous.keySet(), updates.keySet()))) {
cacheService.removeFromCache(evictedCacheKey);
Tuple<CacheFile, Integer> evicted = previous.remove(evictedCacheKey);
if (evicted != null) {
evictions.put(evicted.v1(), evicted.v2());
updates.remove(evictedCacheKey);
} else {
evicted = updates.remove(evictedCacheKey);
evictions.put(evicted.v1(), 0);
}
}
logger.trace("--> capturing expected number of fsyncs per cache directory before synchronization");
final Map<Path, Integer> cacheDirFSyncs = new HashMap<>();
for (int i = 0; i < shardsCacheDirs.length; i++) {
final Path shardCacheDir = shardsCacheDirs[i];
final ShardId shardId = new ShardId(index, i);
final Integer numberOfFSyncs = fileSystemProvider.getNumberOfFSyncs(shardCacheDir);
if (updates.entrySet()
.stream()
.filter(update -> update.getValue().v2() != null)
.filter(update -> update.getValue().v2() > 0)
.anyMatch(update -> update.getKey().shardId().equals(shardId))) {
cacheDirFSyncs.put(shardCacheDir, numberOfFSyncs == null ? 1 : numberOfFSyncs + 1);
} else {
cacheDirFSyncs.put(shardCacheDir, numberOfFSyncs);
}
}
logger.debug("--> synchronizing cache files [#{}]", iteration);
cacheService.synchronizeCache();
logger.trace("--> verifying cache synchronization correctness");
cacheDirFSyncs.forEach(
(dir, expectedNumberOfFSyncs) -> assertThat(
fileSystemProvider.getNumberOfFSyncs(dir),
Constants.WINDOWS ? nullValue() : equalTo(expectedNumberOfFSyncs)
)
);
evictions.forEach((cacheFile, expectedNumberOfFSyncs) -> {
assertThat(cacheService.isCacheFileToSync(cacheFile), is(false));
assertThat(fileSystemProvider.getNumberOfFSyncs(cacheFile.getFile()), equalTo(expectedNumberOfFSyncs));
});
previous.putAll(updates);
previous.forEach((key, cacheFileAndExpectedNumberOfFSyncs) -> {
CacheFile cacheFile = cacheFileAndExpectedNumberOfFSyncs.v1();
assertThat(cacheService.isCacheFileToSync(cacheFile), is(false));
assertThat(fileSystemProvider.getNumberOfFSyncs(cacheFile.getFile()), equalTo(cacheFileAndExpectedNumberOfFSyncs.v2()));
});
}
}
}
public void testPut() throws Exception {
try (CacheService cacheService = defaultCacheService()) {
final long fileLength = randomLongBetween(0L, 1000L);
final CacheKey cacheKey = new CacheKey(
UUIDs.randomBase64UUID(random()),
randomAlphaOfLength(5).toLowerCase(Locale.ROOT),
new ShardId(randomAlphaOfLength(5).toLowerCase(Locale.ROOT), UUIDs.randomBase64UUID(random()), randomInt(5)),
randomAlphaOfLength(105).toLowerCase(Locale.ROOT)
);
final Path cacheDir = Files.createDirectories(
resolveSnapshotCache(randomShardPath(cacheKey.shardId())).resolve(cacheKey.snapshotUUID())
);
final String cacheFileUuid = UUIDs.randomBase64UUID(random());
final SortedSet<ByteRange> cacheFileRanges = randomBoolean() ? randomRanges(fileLength) : emptySortedSet();
if (randomBoolean()) {
final Path cacheFilePath = cacheDir.resolve(cacheFileUuid);
Files.createFile(cacheFilePath);
cacheService.put(cacheKey, fileLength, cacheDir, cacheFileUuid, cacheFileRanges);
cacheService.start();
final CacheFile cacheFile = cacheService.get(cacheKey, fileLength, cacheDir);
assertThat(cacheFile, notNullValue());
assertThat(cacheFile.getFile(), equalTo(cacheFilePath));
assertThat(cacheFile.getCacheKey(), equalTo(cacheKey));
assertThat(cacheFile.getLength(), equalTo(fileLength));
for (ByteRange cacheFileRange : cacheFileRanges) {
assertThat(cacheFile.getAbsentRangeWithin(cacheFileRange), nullValue());
}
} else {
final FileNotFoundException exception = expectThrows(
FileNotFoundException.class,
() -> cacheService.put(cacheKey, fileLength, cacheDir, cacheFileUuid, cacheFileRanges)
);
cacheService.start();
assertThat(exception.getMessage(), containsString(cacheFileUuid));
}
}
}
public void testMarkShardAsEvictedInCache() throws Exception {
final CacheService cacheService = defaultCacheService();
cacheService.start();
final List<CacheFile> randomCacheFiles = randomCacheFiles(cacheService);
assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(0));
final ShardEviction shard = randomShardEvictionFrom(randomCacheFiles);
final List<CacheFile> cacheFilesAssociatedWithShard = filterByShard(shard, randomCacheFiles);
cacheFilesAssociatedWithShard.forEach(cacheFile -> assertTrue(Files.exists(cacheFile.getFile())));
final BlockingEvictionListener blockingListener = new BlockingEvictionListener();
final CacheFile randomCacheFile = randomFrom(cacheFilesAssociatedWithShard);
assertTrue(Files.exists(randomCacheFile.getFile()));
randomCacheFile.acquire(blockingListener);
final List<CacheFile> randomEvictedCacheFiles = randomSubsetOf(randomCacheFiles);
for (CacheFile randomEvictedCacheFile : randomEvictedCacheFiles) {
if (randomEvictedCacheFile != randomCacheFile) {
cacheService.removeFromCache(randomEvictedCacheFile.getCacheKey());
}
}
for (int i = 0; i < between(1, 3); i++) {
cacheService.markShardAsEvictedInCache(shard.snapshotUUID(), shard.snapshotIndexName(), shard.shardId());
}
blockingListener.waitForBlock();
assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(1));
assertTrue(cacheService.isPendingShardEviction(shard));
blockingListener.unblock();
assertBusy(() -> assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(0)));
for (CacheFile cacheFile : randomCacheFiles) {
final boolean evicted = cacheFilesAssociatedWithShard.contains(cacheFile) || randomEvictedCacheFiles.contains(cacheFile);
assertThat(
"Cache file [" + cacheFile + "] should " + (evicted ? "be deleted" : "exist"),
Files.notExists(cacheFile.getFile()),
equalTo(evicted)
);
}
cacheService.close();
if (randomBoolean()) {
// mark shard as evicted after cache service is stopped should have no effect
cacheService.markShardAsEvictedInCache(shard.snapshotUUID(), shard.snapshotIndexName(), shard.shardId());
assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(0));
}
}
public void testProcessShardEviction() throws Exception {
final CacheService cacheService = defaultCacheService();
cacheService.start();
final List<CacheFile> randomCacheFiles = randomCacheFiles(cacheService);
assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(0));
final ShardEviction shard = randomShardEvictionFrom(randomCacheFiles);
final List<CacheFile> cacheFilesAssociatedWithShard = filterByShard(shard, randomCacheFiles);
cacheFilesAssociatedWithShard.forEach(cacheFile -> assertTrue(Files.exists(cacheFile.getFile())));
final BlockingEvictionListener blockingListener = new BlockingEvictionListener();
final CacheFile randomCacheFile = randomFrom(cacheFilesAssociatedWithShard);
assertTrue(Files.exists(randomCacheFile.getFile()));
randomCacheFile.acquire(blockingListener);
cacheService.markShardAsEvictedInCache(shard.snapshotUUID(), shard.snapshotIndexName(), shard.shardId());
final Map<CacheFile, Boolean> afterShardRecoveryCacheFiles = ConcurrentCollections.newConcurrentMap();
final Future<?> waitForShardEvictionFuture = threadPool.generic().submit(() -> {
cacheService.waitForCacheFilesEvictionIfNeeded(shard.snapshotUUID(), shard.snapshotIndexName(), shard.shardId());
for (CacheFile cacheFile : cacheFilesAssociatedWithShard) {
afterShardRecoveryCacheFiles.put(cacheFile, Files.exists(cacheFile.getFile()));
}
});
blockingListener.waitForBlock();
final Map<ShardEviction, Future<?>> pendingShardsEvictions = cacheService.pendingShardsEvictions();
assertTrue(cacheService.isPendingShardEviction(shard));
assertThat(pendingShardsEvictions, aMapWithSize(1));
final Future<?> pendingShardEvictionFuture = pendingShardsEvictions.get(shard);
assertTrue(Files.exists(randomCacheFile.getFile()));
assertThat(pendingShardEvictionFuture, notNullValue());
assertFalse(pendingShardEvictionFuture.isDone());
blockingListener.unblock();
FutureUtils.get(waitForShardEvictionFuture);
assertTrue(pendingShardEvictionFuture.isDone());
FutureUtils.get(pendingShardEvictionFuture);
cacheFilesAssociatedWithShard.forEach(
cacheFile -> assertFalse("Cache file should be evicted: " + cacheFile, Files.exists(cacheFile.getFile()))
);
afterShardRecoveryCacheFiles.forEach(
(cacheFile, exists) -> assertFalse("Cache file should have been evicted after shard recovery: " + cacheFile, exists)
);
assertThat(cacheService.pendingShardsEvictions(), aMapWithSize(0));
cacheService.stop();
}
private static | CacheServiceTests |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/message/MessageDecoder.java | {
"start": 4114,
"end": 4189
} | class ____ thread-safe.
*
* @param <D> a datum class
*/
abstract | are |
java | elastic__elasticsearch | x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java | {
"start": 3384,
"end": 5120
} | class ____ extends MultiCommand {
private static final String AUTO_GEN_CA_DN = "CN=Elastic Certificate Tool Autogenerated CA";
private static final String DESCRIPTION = "Simplifies certificate creation for use with the Elastic Stack";
private static final String DEFAULT_CSR_ZIP = "csr-bundle.zip";
private static final String DEFAULT_CERT_ZIP = "certificate-bundle.zip";
private static final String DEFAULT_CA_ZIP = "elastic-stack-ca.zip";
private static final String DEFAULT_CA_P12 = "elastic-stack-ca.p12";
private static final BouncyCastleProvider BC_PROV = new BouncyCastleProvider();
static final String DEFAULT_CERT_NAME = "instance";
/**
* Used to test whether passwords are ASCII (which PKCS/PBE requires)
*/
private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder();
private static final int DEFAULT_DAYS = 3 * 365;
private static final int FILE_EXTENSION_LENGTH = 4;
static final int MAX_FILENAME_LENGTH = 255 - FILE_EXTENSION_LENGTH;
private static final Pattern ALLOWED_FILENAME_CHAR_PATTERN = Pattern.compile(
"[a-zA-Z0-9!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1," + MAX_FILENAME_LENGTH + "}"
);
private static final int DEFAULT_KEY_SIZE = 2048;
static final List<String> DEFAULT_CA_KEY_USAGE = List.of("keyCertSign", "cRLSign");
// Older versions of OpenSSL had a max internal password length.
// We issue warnings when writing files with passwords that would not be usable in those versions of OpenSSL.
static final String OLD_OPENSSL_VERSION = "1.1.0";
static final int MAX_PASSWORD_OLD_OPENSSL = 50;
/**
* Wraps the certgen object parser.
*/
private static | CertificateTool |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java | {
"start": 3466,
"end": 4098
} | class ____ the item
* @param conf the configuration to store
* @param item the object to be stored
* @param keyName the name of the key to use
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void store(Configuration conf, K item, String keyName)
throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
GenericsUtil.getClass(item));
conf.set(keyName, stringifier.toString(item));
stringifier.close();
}
/**
* Restores the object from the configuration.
*
* @param <K> the | of |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java | {
"start": 1935,
"end": 5755
} | class ____ implements Filter {
private Injector injector;
private Context nmContext;
private static final long serialVersionUID = 1L;
@Inject
public NMWebAppFilter(Injector injector, Context nmContext) {
this.injector = injector;
this.nmContext = nmContext;
}
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse,
FilterChain filterChain) throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
String redirectPath = containerLogPageRedirectPath(request);
if (redirectPath != null) {
String redirectMsg = "Redirecting to log server" + " : " + redirectPath;
PrintWriter out = response.getWriter();
out.println(redirectMsg);
response.setHeader("Location", redirectPath);
response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
return;
}
filterChain.doFilter(request, response);
}
private String containerLogPageRedirectPath(HttpServletRequest request) {
String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
String redirectPath = null;
if (!uri.contains("/ws/v1/node") && uri.contains("/containerlogs")) {
String[] parts = uri.split("/");
String containerIdStr = parts[3];
String appOwner = parts[4];
String logType = null;
if (parts.length > 5) {
logType = parts[5];
}
if (containerIdStr != null && !containerIdStr.isEmpty()) {
ContainerId containerId;
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException ex) {
return redirectPath;
}
ApplicationId appId =
containerId.getApplicationAttemptId().getApplicationId();
Application app = nmContext.getApplications().get(appId);
boolean fetchAggregatedLog = false;
List<NameValuePair> params = WebAppUtils.getURLEncodedQueryParam(request);
if (params != null) {
for (NameValuePair param : params) {
if (param.getName().equals(ContainerLogsPage
.LOG_AGGREGATION_TYPE)) {
if (param.getValue().equals(ContainerLogsPage
.LOG_AGGREGATION_REMOTE_TYPE)) {
fetchAggregatedLog = true;
}
}
}
}
Configuration nmConf = nmContext.getLocalDirsHandler().getConfig();
if ((app == null || fetchAggregatedLog)
&& nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
String logServerUrl =
nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL);
if (logServerUrl != null && !logServerUrl.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append(logServerUrl);
sb.append("/");
sb.append(nmContext.getNodeId().toString());
sb.append("/");
sb.append(containerIdStr);
sb.append("/");
sb.append(containerIdStr);
sb.append("/");
sb.append(appOwner);
if (logType != null && !logType.isEmpty()) {
sb.append("/");
sb.append(logType);
}
redirectPath =
WebAppUtils.appendQueryParams(request, sb.toString());
} else {
injector.getInstance(RequestContext.class).set(
ContainerLogsPage.REDIRECT_URL, "false");
}
}
}
}
return redirectPath;
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void destroy() {
}
}
| NMWebAppFilter |
java | grpc__grpc-java | gae-interop-testing/gae-jdk8/src/main/java/io/grpc/testing/integration/LongLivedChannel.java | {
"start": 1153,
"end": 1549
} | class ____ checks we can reuse a channel across requests.
*
* <p>This servlet communicates with {@code grpc-test.sandbox.googleapis.com}, which is a server
* managed by the gRPC team. For more information, see
* <a href="https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md">
* Interoperability Test Case Descriptions</a>.
*/
@SuppressWarnings("serial")
public final | that |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ICLIService.java | {
"start": 1113,
"end": 4663
} | interface ____ {
SessionHandle openSession(String username, String password,
Map<String, String> configuration)
throws HiveSQLException;
SessionHandle openSessionWithImpersonation(String username, String password,
Map<String, String> configuration, String delegationToken)
throws HiveSQLException;
void closeSession(SessionHandle sessionHandle)
throws HiveSQLException;
GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType)
throws HiveSQLException;
OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
Map<String, String> confOverlay) throws HiveSQLException;
OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException;
OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
Map<String, String> confOverlay) throws HiveSQLException;
OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException;
OperationHandle getTypeInfo(SessionHandle sessionHandle)
throws HiveSQLException;
OperationHandle getCatalogs(SessionHandle sessionHandle)
throws HiveSQLException;
OperationHandle getSchemas(SessionHandle sessionHandle,
String catalogName, String schemaName)
throws HiveSQLException;
OperationHandle getTables(SessionHandle sessionHandle,
String catalogName, String schemaName, String tableName, List<String> tableTypes)
throws HiveSQLException;
OperationHandle getTableTypes(SessionHandle sessionHandle)
throws HiveSQLException;
OperationHandle getColumns(SessionHandle sessionHandle,
String catalogName, String schemaName, String tableName, String columnName)
throws HiveSQLException;
OperationHandle getFunctions(SessionHandle sessionHandle,
String catalogName, String schemaName, String functionName)
throws HiveSQLException;
OperationStatus getOperationStatus(OperationHandle opHandle)
throws HiveSQLException;
void cancelOperation(OperationHandle opHandle)
throws HiveSQLException;
void closeOperation(OperationHandle opHandle)
throws HiveSQLException;
TTableSchema getResultSetMetadata(OperationHandle opHandle)
throws HiveSQLException;
TRowSet fetchResults(OperationHandle opHandle)
throws HiveSQLException;
TRowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation,
long maxRows, FetchType fetchType) throws HiveSQLException;
String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory,
String owner, String renewer) throws HiveSQLException;
String getQueryId(TOperationHandle operationHandle) throws HiveSQLException;
void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory,
String tokenStr) throws HiveSQLException;
void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory,
String tokenStr) throws HiveSQLException;
OperationHandle getPrimaryKeys(SessionHandle sessionHandle, String catalog,
String schema, String table) throws HiveSQLException;
OperationHandle getCrossReference(SessionHandle sessionHandle,
String primaryCatalog, String primarySchema, String primaryTable,
String foreignCatalog, String foreignSchema, String foreignTable) throws HiveSQLException;
}
| ICLIService |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcMultiplexer.java | {
"start": 862,
"end": 940
} | interface ____ make a pluggable multiplexer in the
* FairCallQueue.
*/
public | to |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java | {
"start": 22162,
"end": 22553
} | class ____
implements ArgumentMatcher<MetricsTag> {
private final String tagName;
public TagExistsMatcher(String tagName) {
this.tagName = tagName;
}
@Override
public boolean matches(MetricsTag asTag) {
return asTag.name().equals(tagName);
}
@Override
public String toString() {
return "Has tag " + tagName;
}
}
}
| TagExistsMatcher |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/MissingSummaryTest.java | {
"start": 5667,
"end": 6223
} | class ____ implements java.util.function.Predicate<Object> {
/**
* @param o thing to compare
*/
public boolean test(Object o) {
return false;
}
}
""")
.doTest();
}
@Test
@SuppressWarnings("MisformattedTestData")
public void seeWithHtmlLink() {
helper
.addSourceLines(
"Test.java",
"""
// BUG: Diagnostic contains:
/** @see <a href="foo">bar</a> */
public | Test |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/common/MyStatefulBean.java | {
"start": 912,
"end": 1072
} | class ____ {
private final AtomicInteger counter = new AtomicInteger();
public int add() {
return counter.incrementAndGet();
}
}
| MyStatefulBean |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java | {
"start": 12968,
"end": 72134
} | enum ____ {
RE_INIT, COMMIT, ROLLBACK, LOCALIZE;
}
/**
* Extra duration to wait for applications to be killed on shutdown.
*/
private static final int SHUTDOWN_CLEANUP_SLOP_MS = 1000;
private static final Logger LOG =
LoggerFactory.getLogger(ContainerManagerImpl.class);
public static final String INVALID_NMTOKEN_MSG = "Invalid NMToken";
static final String INVALID_CONTAINERTOKEN_MSG =
"Invalid ContainerToken";
protected final Context context;
private final ContainersMonitor containersMonitor;
private Server server;
private final ResourceLocalizationService rsrcLocalizationSrvc;
private final AbstractContainersLauncher containersLauncher;
private final AuxServices auxiliaryServices;
@VisibleForTesting final NodeManagerMetrics metrics;
protected final NodeStatusUpdater nodeStatusUpdater;
protected LocalDirsHandlerService dirsHandler;
private AsyncDispatcher dispatcher;
private final DeletionService deletionService;
private LogHandler logHandler;
private boolean serviceStopped = false;
private final ReadLock readLock;
private final WriteLock writeLock;
private AMRMProxyService amrmProxyService;
protected boolean amrmProxyEnabled = false;
private final ContainerScheduler containerScheduler;
private long waitForContainersOnShutdownMillis;
// NM metrics publisher is set only if the timeline service v.2 is enabled
private NMTimelinePublisher nmMetricsPublisher;
private boolean timelineServiceV2Enabled;
private boolean nmDispatherMetricEnabled;
public ContainerManagerImpl(Context context, ContainerExecutor exec,
DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
NodeManagerMetrics metrics, LocalDirsHandlerService dirsHandler) {
super(ContainerManagerImpl.class.getName());
this.context = context;
this.dirsHandler = dirsHandler;
// ContainerManager level dispatcher.
dispatcher = createContainerManagerDispatcher();
this.deletionService = deletionContext;
this.metrics = metrics;
rsrcLocalizationSrvc =
createResourceLocalizationService(exec, deletionContext, context,
metrics);
addService(rsrcLocalizationSrvc);
containersLauncher = createContainersLauncher(context, exec);
addService(containersLauncher);
this.nodeStatusUpdater = nodeStatusUpdater;
this.containerScheduler = createContainerScheduler(context);
addService(containerScheduler);
AuxiliaryLocalPathHandler auxiliaryLocalPathHandler =
new AuxiliaryLocalPathHandlerImpl(dirsHandler);
// Start configurable services
auxiliaryServices = new AuxServices(auxiliaryLocalPathHandler,
this.context, this.deletionService);
auxiliaryServices.registerServiceListener(this);
context.setAuxServices(auxiliaryServices);
addService(auxiliaryServices);
// initialize the metrics publisher if the timeline service v.2 is enabled
// and the system publisher is enabled
Configuration conf = context.getConf();
if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
if (YarnConfiguration.systemMetricsPublisherEnabled(conf)) {
LOG.info("YARN system metrics publishing service is enabled");
nmMetricsPublisher = createNMTimelinePublisher(context);
context.setNMTimelinePublisher(nmMetricsPublisher);
}
this.timelineServiceV2Enabled = true;
}
this.containersMonitor = createContainersMonitor(exec);
addService(this.containersMonitor);
dispatcher.register(ContainerEventType.class,
new ContainerEventDispatcher());
dispatcher.register(ApplicationEventType.class,
createApplicationEventDispatcher());
dispatcher.register(LocalizationEventType.class,
new LocalizationEventHandlerWrapper(rsrcLocalizationSrvc,
nmMetricsPublisher));
dispatcher.register(AuxServicesEventType.class, auxiliaryServices);
dispatcher.register(ContainersMonitorEventType.class, containersMonitor);
dispatcher.register(ContainersLauncherEventType.class, containersLauncher);
dispatcher.register(ContainerSchedulerEventType.class, containerScheduler);
addService(dispatcher);
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
this.writeLock = lock.writeLock();
}
@Override
public void serviceInit(Configuration conf) throws Exception {
logHandler =
createLogHandler(conf, this.context, this.deletionService);
addIfService(logHandler);
dispatcher.register(LogHandlerEventType.class, logHandler);
// add the shared cache upload service (it will do nothing if the shared
// cache is disabled)
SharedCacheUploadService sharedCacheUploader =
createSharedCacheUploaderService();
addService(sharedCacheUploader);
dispatcher.register(SharedCacheUploadEventType.class, sharedCacheUploader);
createAMRMProxyService(conf);
waitForContainersOnShutdownMillis =
conf.getLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS,
YarnConfiguration.DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS) +
conf.getLong(YarnConfiguration.NM_PROCESS_KILL_WAIT_MS,
YarnConfiguration.DEFAULT_NM_PROCESS_KILL_WAIT_MS) +
SHUTDOWN_CLEANUP_SLOP_MS;
nmDispatherMetricEnabled = conf.getBoolean(
YarnConfiguration.NM_DISPATCHER_METRIC_ENABLED,
YarnConfiguration.DEFAULT_NM_DISPATCHER_METRIC_ENABLED);
super.serviceInit(conf);
recover();
}
@SuppressWarnings("unchecked")
protected AsyncDispatcher createContainerManagerDispatcher() {
dispatcher = new AsyncDispatcher("NM ContainerManager dispatcher");
if (!nmDispatherMetricEnabled) {
return dispatcher;
}
GenericEventTypeMetrics<ContainerEventType> containerEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(), ContainerEventType.class);
dispatcher.addMetrics(containerEventTypeMetrics, containerEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<LocalizationEventType> localizationEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(), LocalizationEventType.class);
dispatcher.addMetrics(localizationEventTypeMetrics,
localizationEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<ApplicationEventType> applicationEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(), ApplicationEventType.class);
dispatcher.addMetrics(applicationEventTypeMetrics,
applicationEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<ContainersLauncherEventType> containersLauncherEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(),
ContainersLauncherEventType.class);
dispatcher.addMetrics(containersLauncherEventTypeMetrics,
containersLauncherEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<ContainerSchedulerEventType> containerSchedulerEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(),
ContainerSchedulerEventType.class);
dispatcher.addMetrics(containerSchedulerEventTypeMetrics,
containerSchedulerEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<ContainersMonitorEventType> containersMonitorEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(),
ContainersMonitorEventType.class);
dispatcher.addMetrics(containersMonitorEventTypeMetrics,
containersMonitorEventTypeMetrics.getEnumClass());
GenericEventTypeMetrics<AuxServicesEventType> auxServicesEventTypeTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(), AuxServicesEventType.class);
dispatcher.addMetrics(auxServicesEventTypeTypeMetrics,
auxServicesEventTypeTypeMetrics.getEnumClass());
GenericEventTypeMetrics<LocalizerEventType> localizerEventTypeMetrics =
GenericEventTypeMetricsManager.create(dispatcher.getName(), LocalizerEventType.class);
dispatcher.addMetrics(localizerEventTypeMetrics, localizerEventTypeMetrics.getEnumClass());
LOG.info("NM ContainerManager dispatcher Metric Initialization Completed.");
return dispatcher;
}
protected void createAMRMProxyService(Configuration conf) {
this.amrmProxyEnabled =
conf.getBoolean(YarnConfiguration.AMRM_PROXY_ENABLED,
YarnConfiguration.DEFAULT_AMRM_PROXY_ENABLED) ||
conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED,
YarnConfiguration.DEFAULT_DIST_SCHEDULING_ENABLED);
if (amrmProxyEnabled) {
LOG.info("AMRMProxyService is enabled. "
+ "All the AM->RM requests will be intercepted by the proxy");
this.setAMRMProxyService(
new AMRMProxyService(this.context, this.dispatcher));
addService(this.getAMRMProxyService());
} else {
LOG.info("AMRMProxyService is disabled");
}
}
@VisibleForTesting
protected ContainerScheduler createContainerScheduler(Context cntxt) {
// Currently, this dispatcher is shared by the ContainerManager,
// all the containers, the container monitor and all the container.
// The ContainerScheduler may use its own dispatcher.
return new ContainerScheduler(cntxt, dispatcher, metrics);
}
protected ContainersMonitor createContainersMonitor(ContainerExecutor exec) {
return new ContainersMonitorImpl(exec, dispatcher, this.context);
}
@SuppressWarnings("unchecked")
private void recover() throws IOException, URISyntaxException {
NMStateStoreService stateStore = context.getNMStateStore();
if (stateStore.canRecover()) {
rsrcLocalizationSrvc.recoverLocalizedResources(
stateStore.loadLocalizationState());
RecoveredApplicationsState appsState = stateStore.loadApplicationsState();
try (RecoveryIterator<ContainerManagerApplicationProto> rasIterator =
appsState.getIterator()) {
while (rasIterator.hasNext()) {
ContainerManagerApplicationProto proto = rasIterator.next();
LOG.debug("Recovering application with state: {}", proto);
recoverApplication(proto);
}
}
try (RecoveryIterator<RecoveredContainerState> rcsIterator =
stateStore.getContainerStateIterator()) {
while (rcsIterator.hasNext()) {
RecoveredContainerState rcs = rcsIterator.next();
LOG.debug("Recovering container with state: {}", rcs);
recoverContainer(rcs);
}
}
// Recovery AMRMProxy state after apps and containers are recovered
if (this.amrmProxyEnabled) {
this.getAMRMProxyService().recover();
}
//Dispatching the RECOVERY_COMPLETED event through the dispatcher
//so that all the paused, scheduled and queued containers will
//be scheduled for execution on availability of resources.
dispatcher.getEventHandler().handle(
new ContainerSchedulerEvent(null,
ContainerSchedulerEventType.RECOVERY_COMPLETED));
} else {
LOG.info("Not a recoverable state store. Nothing to recover.");
}
}
private void recoverApplication(ContainerManagerApplicationProto p)
throws IOException {
ApplicationId appId = new ApplicationIdPBImpl(p.getId());
Credentials creds = new Credentials();
creds.readTokenStorageStream(
new DataInputStream(p.getCredentials().newInput()));
List<ApplicationACLMapProto> aclProtoList = p.getAclsList();
Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>(aclProtoList.size());
for (ApplicationACLMapProto aclProto : aclProtoList) {
acls.put(ProtoUtils.convertFromProtoFormat(aclProto.getAccessType()),
aclProto.getAcl());
}
LogAggregationContext logAggregationContext = null;
if (p.getLogAggregationContext() != null) {
logAggregationContext =
new LogAggregationContextPBImpl(p.getLogAggregationContext());
}
FlowContext fc = null;
if (p.getFlowContext() != null) {
FlowContextProto fcp = p.getFlowContext();
fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
fcp.getFlowRunId());
LOG.debug(
"Recovering Flow context: {} for an application {}", fc, appId);
} else {
// in upgrade situations, where there is no prior existing flow context,
// default would be used.
fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
LOG.debug(
"No prior existing flow context found. Using default Flow context: "
+ "{} for an application {}", fc, appId);
}
LOG.info("Recovering application " + appId);
ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
appId, creds, context, p.getAppLogAggregationInitedTime());
context.getApplications().put(appId, app);
metrics.runningApplication();
app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
}
private void recoverContainer(RecoveredContainerState rcs)
throws IOException {
StartContainerRequest req = rcs.getStartRequest();
ContainerLaunchContext launchContext = req.getContainerLaunchContext();
ContainerTokenIdentifier token;
if(rcs.getCapability() != null) {
ContainerTokenIdentifier originalToken =
BuilderUtils.newContainerTokenIdentifier(req.getContainerToken());
token = new ContainerTokenIdentifier(originalToken.getContainerID(),
originalToken.getVersion(), originalToken.getNmHostAddress(),
originalToken.getApplicationSubmitter(), rcs.getCapability(),
originalToken.getExpiryTimeStamp(), originalToken.getMasterKeyId(),
originalToken.getRMIdentifier(), originalToken.getPriority(),
originalToken.getCreationTime(),
originalToken.getLogAggregationContext(),
originalToken.getNodeLabelExpression(),
originalToken.getContainerType(), originalToken.getExecutionType(),
originalToken.getAllocationRequestId(),
originalToken.getAllcationTags());
} else {
token = BuilderUtils.newContainerTokenIdentifier(req.getContainerToken());
}
ContainerId containerId = token.getContainerID();
ApplicationId appId =
containerId.getApplicationAttemptId().getApplicationId();
LOG.info("Recovering " + containerId + " in state " + rcs.getStatus()
+ " with exit code " + rcs.getExitCode());
Application app = context.getApplications().get(appId);
if (app != null) {
recoverActiveContainer(app, launchContext, token, rcs);
if (rcs.getRecoveryType() == RecoveredContainerType.KILL) {
dispatcher.getEventHandler().handle(
new ContainerKillEvent(containerId, ContainerExitStatus.ABORTED,
"Due to invalid StateStore info container was killed"
+ " during recovery"));
}
} else {
if (rcs.getStatus() != RecoveredContainerStatus.COMPLETED) {
LOG.warn(containerId + " has no corresponding application!");
}
LOG.info("Adding " + containerId + " to recently stopped containers");
nodeStatusUpdater.addCompletedContainer(containerId);
}
}
/**
* Recover a running container.
*/
@SuppressWarnings("unchecked")
protected void recoverActiveContainer(Application app,
ContainerLaunchContext launchContext, ContainerTokenIdentifier token,
RecoveredContainerState rcs) throws IOException {
Credentials credentials = YarnServerSecurityUtils.parseCredentials(
launchContext);
Container container = new ContainerImpl(getConfig(), dispatcher,
launchContext, credentials, metrics, token, context, rcs);
context.getContainers().put(token.getContainerID(), container);
containerScheduler.recoverActiveContainer(container, rcs);
app.handle(new ApplicationContainerInitEvent(container));
}
private void waitForRecoveredContainers() throws InterruptedException {
final int sleepMsec = 100;
int waitIterations = 100;
List<ContainerId> newContainers = new ArrayList<ContainerId>();
while (--waitIterations >= 0) {
newContainers.clear();
for (Container container : context.getContainers().values()) {
if (container.getContainerState() == org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.NEW) {
newContainers.add(container.getContainerId());
}
}
if (newContainers.isEmpty()) {
break;
}
LOG.info("Waiting for containers: " + newContainers);
Thread.sleep(sleepMsec);
}
if (waitIterations < 0) {
LOG.warn("Timeout waiting for recovered containers");
}
}
protected LogHandler createLogHandler(Configuration conf, Context context,
DeletionService deletionService) {
if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
return new LogAggregationService(this.dispatcher, context,
deletionService, dirsHandler);
} else {
return new NonAggregatingLogHandler(this.dispatcher, deletionService,
dirsHandler,
context.getNMStateStore());
}
}
@Override
public ContainersMonitor getContainersMonitor() {
return this.containersMonitor;
}
protected ResourceLocalizationService createResourceLocalizationService(
ContainerExecutor exec, DeletionService deletionContext,
Context nmContext, NodeManagerMetrics nmMetrics) {
return new ResourceLocalizationService(this.dispatcher, exec,
deletionContext, dirsHandler, nmContext, nmMetrics);
}
protected SharedCacheUploadService createSharedCacheUploaderService() {
return new SharedCacheUploadService();
}
@VisibleForTesting
protected NMTimelinePublisher createNMTimelinePublisher(Context ctxt) {
NMTimelinePublisher nmTimelinePublisherLocal =
new NMTimelinePublisher(ctxt);
addIfService(nmTimelinePublisherLocal);
return nmTimelinePublisherLocal;
}
protected AbstractContainersLauncher createContainersLauncher(
Context ctxt, ContainerExecutor exec) {
Class<? extends AbstractContainersLauncher> containersLauncherClass =
ctxt.getConf()
.getClass(YarnConfiguration.NM_CONTAINERS_LAUNCHER_CLASS,
ContainersLauncher.class, AbstractContainersLauncher.class);
AbstractContainersLauncher launcher;
try {
launcher = ReflectionUtils.newInstance(containersLauncherClass,
ctxt.getConf());
launcher.init(ctxt, this.dispatcher, exec, dirsHandler, this);
} catch (Exception e) {
throw new RuntimeException(e);
}
return launcher;
}
protected EventHandler<ApplicationEvent> createApplicationEventDispatcher() {
return new ApplicationEventDispatcher();
}
@Override
protected void serviceStart() throws Exception {
// Enqueue user dirs in deletion context
Configuration conf = getConfig();
final InetSocketAddress initialAddress = conf.getSocketAddr(
YarnConfiguration.NM_BIND_HOST,
YarnConfiguration.NM_ADDRESS,
YarnConfiguration.DEFAULT_NM_ADDRESS,
YarnConfiguration.DEFAULT_NM_PORT);
boolean usingEphemeralPort = (initialAddress.getPort() == 0);
if (context.getNMStateStore().canRecover() && usingEphemeralPort) {
throw new IllegalArgumentException("Cannot support recovery with an "
+ "ephemeral server port. Check the setting of "
+ YarnConfiguration.NM_ADDRESS);
}
// If recovering then delay opening the RPC service until the recovery
// of resources and containers have completed, otherwise requests from
// clients during recovery can interfere with the recovery process.
final boolean delayedRpcServerStart =
context.getNMStateStore().canRecover();
Configuration serverConf = new Configuration(conf);
// always enforce it to be token-based.
serverConf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
SaslRpcServer.AuthMethod.TOKEN.toString());
YarnRPC rpc = YarnRPC.create(conf);
server =
rpc.getServer(ContainerManagementProtocol.class, this, initialAddress,
serverConf, this.context.getNMTokenSecretManager(),
conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT,
YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
refreshServiceAcls(conf, NMPolicyProvider.getInstance());
}
String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
String hostOverride = null;
if (bindHost != null && !bindHost.isEmpty()
&& nmAddress != null && !nmAddress.isEmpty()) {
//a bind-host case with an address, to support overriding the first
//hostname found when querying for our hostname with the specified
//address, combine the specified address with the actual port listened
//on by the server
hostOverride = nmAddress.split(":")[0];
}
// setup node ID
InetSocketAddress connectAddress;
if (delayedRpcServerStart) {
connectAddress = NetUtils.getConnectAddress(initialAddress);
} else {
server.start();
connectAddress = NetUtils.getConnectAddress(server);
}
NodeId nodeId = buildNodeId(connectAddress, hostOverride);
((NodeManager.NMContext)context).setNodeId(nodeId);
this.context.getNMTokenSecretManager().setNodeId(nodeId);
this.context.getContainerTokenSecretManager().setNodeId(nodeId);
// start remaining services
super.serviceStart();
if (delayedRpcServerStart) {
waitForRecoveredContainers();
server.start();
// check that the node ID is as previously advertised
connectAddress = NetUtils.getConnectAddress(server);
NodeId serverNode = buildNodeId(connectAddress, hostOverride);
if (!serverNode.equals(nodeId)) {
throw new IOException("Node mismatch after server started, expected '"
+ nodeId + "' but found '" + serverNode + "'");
}
}
LOG.info("ContainerManager started at " + connectAddress);
LOG.info("ContainerManager bound to " + initialAddress);
}
private NodeId buildNodeId(InetSocketAddress connectAddress,
String hostOverride) {
if (hostOverride != null) {
connectAddress = NetUtils.getConnectAddress(
new InetSocketAddress(hostOverride, connectAddress.getPort()));
}
return NodeId.newInstance(
connectAddress.getAddress().getCanonicalHostName(),
connectAddress.getPort());
}
void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) {
this.server.refreshServiceAcl(configuration, policyProvider);
}
@Override
public void serviceStop() throws Exception {
this.writeLock.lock();
try {
serviceStopped = true;
if (context != null) {
cleanUpApplicationsOnNMShutDown();
}
} finally {
this.writeLock.unlock();
}
if (auxiliaryServices.getServiceState() == STARTED) {
auxiliaryServices.unregisterServiceListener(this);
}
if (server != null) {
server.stop();
}
super.serviceStop();
}
public void cleanUpApplicationsOnNMShutDown() {
Map<ApplicationId, Application> applications =
this.context.getApplications();
if (applications.isEmpty()) {
return;
}
LOG.info("Applications still running : " + applications.keySet());
if (this.context.getNMStateStore().canRecover()
&& !this.context.getDecommissioned()) {
if (getConfig().getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED,
YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED)) {
// do not cleanup apps as they can be recovered on restart
return;
}
}
List<ApplicationId> appIds =
new ArrayList<ApplicationId>(applications.keySet());
this.handle(new CMgrCompletedAppsEvent(appIds,
CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
LOG.info("Waiting for Applications to be Finished");
long waitStartTime = System.currentTimeMillis();
while (!applications.isEmpty()
&& System.currentTimeMillis() - waitStartTime < waitForContainersOnShutdownMillis) {
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
LOG.warn(
"Interrupted while sleeping on applications finish on shutdown", ex);
}
}
// All applications Finished
if (applications.isEmpty()) {
LOG.info("All applications in FINISHED state");
} else {
LOG.info("Done waiting for Applications to be Finished. Still alive: "
+ applications.keySet());
}
}
public void cleanupContainersOnNMResync() {
Map<ContainerId, Container> containers = context.getContainers();
if (containers.isEmpty()) {
return;
}
LOG.info("Containers still running on "
+ CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC + " : "
+ containers.keySet());
List<ContainerId> containerIds =
new ArrayList<ContainerId>(containers.keySet());
LOG.info("Waiting for containers to be killed");
this.handle(new CMgrCompletedContainersEvent(containerIds,
CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC));
/*
* We will wait till all the containers change their state to COMPLETE. We
* will not remove the container statuses from nm context because these
* are used while re-registering node manager with resource manager.
*/
boolean allContainersCompleted = false;
while (!containers.isEmpty() && !allContainersCompleted) {
allContainersCompleted = true;
for (Entry<ContainerId, Container> container : containers.entrySet()) {
if (((ContainerImpl) container.getValue()).getCurrentState()
!= ContainerState.COMPLETE) {
allContainersCompleted = false;
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
LOG.warn("Interrupted while sleeping on container kill on resync",
ex);
}
break;
}
}
}
// All containers killed
if (allContainersCompleted) {
LOG.info("All containers in DONE state");
} else {
LOG.info("Done waiting for containers to be killed. Still alive: " +
containers.keySet());
}
}
// Get the remoteUGI corresponding to the api call.
protected UserGroupInformation getRemoteUgi()
throws YarnException {
UserGroupInformation remoteUgi;
try {
remoteUgi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
String msg = "Cannot obtain the user-name. Got exception: "
+ StringUtils.stringifyException(e);
LOG.warn(msg);
throw RPCUtil.getRemoteException(msg);
}
return remoteUgi;
}
// Obtain the needed ContainerTokenIdentifier from the remote-UGI. RPC layer
// currently sets only the required id, but iterate through anyways just to
// be sure.
@Private
@VisibleForTesting
protected NMTokenIdentifier selectNMTokenIdentifier(
UserGroupInformation remoteUgi) {
Set<TokenIdentifier> tokenIdentifiers = remoteUgi.getTokenIdentifiers();
NMTokenIdentifier resultId = null;
for (TokenIdentifier id : tokenIdentifiers) {
if (id instanceof NMTokenIdentifier) {
resultId = (NMTokenIdentifier) id;
break;
}
}
return resultId;
}
protected void authorizeUser(UserGroupInformation remoteUgi,
NMTokenIdentifier nmTokenIdentifier) throws YarnException {
if (nmTokenIdentifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
if (!remoteUgi.getUserName().equals(
nmTokenIdentifier.getApplicationAttemptId().toString())) {
throw RPCUtil.getRemoteException("Expected applicationAttemptId: "
+ remoteUgi.getUserName() + "Found: "
+ nmTokenIdentifier.getApplicationAttemptId());
}
}
/**
* @param containerTokenIdentifier
* of the container whose resource is to be started or increased
* @throws YarnException
*/
@Private
@VisibleForTesting
protected void authorizeStartAndResourceIncreaseRequest(
NMTokenIdentifier nmTokenIdentifier,
ContainerTokenIdentifier containerTokenIdentifier,
boolean startRequest)
throws YarnException {
if (nmTokenIdentifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
if (containerTokenIdentifier == null) {
throw RPCUtil.getRemoteException(INVALID_CONTAINERTOKEN_MSG);
}
/*
* Check the following:
* 1. The request comes from the same application attempt
* 2. The request possess a container token that has not expired
* 3. The request possess a container token that is granted by a known RM
*/
ContainerId containerId = containerTokenIdentifier.getContainerID();
String containerIDStr = containerId.toString();
boolean unauthorized = false;
StringBuilder messageBuilder =
new StringBuilder("Unauthorized request to " + (startRequest ?
"start container." : "increase container resource."));
if (!nmTokenIdentifier.getApplicationAttemptId().getApplicationId().
equals(containerId.getApplicationAttemptId().getApplicationId())) {
unauthorized = true;
messageBuilder.append("\nNMToken for application attempt : ")
.append(nmTokenIdentifier.getApplicationAttemptId())
.append(" was used for "
+ (startRequest ? "starting " : "increasing resource of ")
+ "container with container token")
.append(" issued for application attempt : ")
.append(containerId.getApplicationAttemptId());
} else if (startRequest && !this.context.getContainerTokenSecretManager()
.isValidStartContainerRequest(containerTokenIdentifier)) {
// Is the container being relaunched? Or RPC layer let startCall with
// tokens generated off old-secret through?
unauthorized = true;
messageBuilder.append("\n Attempt to relaunch the same ")
.append("container with id ").append(containerIDStr).append(".");
} else if (containerTokenIdentifier.getExpiryTimeStamp() < System
.currentTimeMillis()) {
// Ensure the token is not expired.
unauthorized = true;
messageBuilder.append("\nThis token is expired. current time is ")
.append(System.currentTimeMillis()).append(" found ")
.append(containerTokenIdentifier.getExpiryTimeStamp());
messageBuilder.append("\nNote: System times on machines may be out of sync.")
.append(" Check system time and time zones.");
}
if (unauthorized) {
String msg = messageBuilder.toString();
LOG.error(msg);
throw RPCUtil.getRemoteException(msg);
}
if (containerTokenIdentifier.getRMIdentifier() != nodeStatusUpdater
.getRMIdentifier()) {
// Is the container coming from unknown RM
StringBuilder sb = new StringBuilder("\nContainer ");
sb.append(containerTokenIdentifier.getContainerID().toString())
.append(" rejected as it is allocated by a previous RM");
throw new InvalidContainerException(sb.toString());
}
}
/**
* Start a list of containers on this NodeManager.
*/
@Override
public StartContainersResponse startContainers(
StartContainersRequest requests) throws YarnException, IOException {
UserGroupInformation remoteUgi = getRemoteUgi();
String remoteUser = remoteUgi.getUserName();
NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
authorizeUser(remoteUgi, nmTokenIdentifier);
List<ContainerId> succeededContainers = new ArrayList<ContainerId>();
Map<ContainerId, SerializedException> failedContainers =
new HashMap<ContainerId, SerializedException>();
// Synchronize with NodeStatusUpdaterImpl#registerWithRM
// to avoid race condition during NM-RM resync (due to RM restart) while a
// container is being started, in particular when the container has not yet
// been added to the containers map in NMContext.
synchronized (this.context) {
for (StartContainerRequest request : requests
.getStartContainerRequests()) {
ContainerId containerId = null;
try {
if (request.getContainerToken() == null
|| request.getContainerToken().getIdentifier() == null) {
throw new IOException(INVALID_CONTAINERTOKEN_MSG);
}
ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
.newContainerTokenIdentifier(request.getContainerToken());
verifyAndGetContainerTokenIdentifier(request.getContainerToken(),
containerTokenIdentifier);
containerId = containerTokenIdentifier.getContainerID();
// Initialize the AMRMProxy service instance only if the container is of
// type AM and if the AMRMProxy service is enabled
if (amrmProxyEnabled && containerTokenIdentifier.getContainerType()
.equals(ContainerType.APPLICATION_MASTER)) {
this.getAMRMProxyService().processApplicationStartRequest(request);
}
performContainerPreStartChecks(nmTokenIdentifier, request,
containerTokenIdentifier);
startContainerInternal(containerTokenIdentifier, request,
remoteUser);
succeededContainers.add(containerId);
} catch (YarnException e) {
failedContainers.put(containerId, SerializedException.newInstance(e));
} catch (InvalidToken ie) {
failedContainers
.put(containerId, SerializedException.newInstance(ie));
throw ie;
} catch (IOException e) {
throw RPCUtil.getRemoteException(e);
}
}
return StartContainersResponse
.newInstance(getAuxServiceMetaData(), succeededContainers,
failedContainers);
}
}
private void performContainerPreStartChecks(
NMTokenIdentifier nmTokenIdentifier, StartContainerRequest request,
ContainerTokenIdentifier containerTokenIdentifier)
throws YarnException, InvalidToken {
/*
* 1) It should save the NMToken into NMTokenSecretManager. This is done
* here instead of RPC layer because at the time of opening/authenticating
* the connection it doesn't know what all RPC calls user will make on it.
* Also new NMToken is issued only at startContainer (once it gets
* renewed).
*
* 2) It should validate containerToken. Need to check below things. a) It
* is signed by correct master key (part of retrieve password). b) It
* belongs to correct Node Manager (part of retrieve password). c) It has
* correct RMIdentifier. d) It is not expired.
*/
authorizeStartAndResourceIncreaseRequest(
nmTokenIdentifier, containerTokenIdentifier, true);
// update NMToken
updateNMTokenIdentifier(nmTokenIdentifier);
ContainerLaunchContext launchContext = request.getContainerLaunchContext();
Map<String, ByteBuffer> serviceData = getAuxServiceMetaData();
if (launchContext.getServiceData()!=null &&
!launchContext.getServiceData().isEmpty()) {
for (Entry<String, ByteBuffer> meta : launchContext.getServiceData()
.entrySet()) {
if (null == serviceData.get(meta.getKey())) {
throw new InvalidAuxServiceException("The auxService:" + meta.getKey()
+ " does not exist");
}
}
}
}
private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
String user, Credentials credentials,
Map<ApplicationAccessType, String> appAcls,
LogAggregationContext logAggregationContext, FlowContext flowContext) {
ContainerManagerApplicationProto.Builder builder =
ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId).getProto());
builder.setUser(user);
if (logAggregationContext != null) {
builder.setLogAggregationContext((
(LogAggregationContextPBImpl)logAggregationContext).getProto());
}
builder.clearCredentials();
if (credentials != null) {
DataOutputBuffer dob = new DataOutputBuffer();
try {
credentials.writeTokenStorageToStream(dob);
builder.setCredentials(ByteString.copyFrom(dob.getData()));
} catch (IOException e) {
// should not occur
LOG.error("Cannot serialize credentials", e);
}
}
builder.clearAcls();
if (appAcls != null) {
for (Map.Entry<ApplicationAccessType, String> acl : appAcls.entrySet()) {
ApplicationACLMapProto p = ApplicationACLMapProto.newBuilder()
.setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey()))
.setAcl(acl.getValue())
.build();
builder.addAcls(p);
}
}
builder.clearFlowContext();
if (flowContext != null && flowContext.getFlowName() != null
&& flowContext.getFlowVersion() != null) {
FlowContextProto fcp =
FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
.setFlowVersion(flowContext.getFlowVersion())
.setFlowRunId(flowContext.getFlowRunId()).build();
builder.setFlowContext(fcp);
}
return builder.build();
}
@SuppressWarnings("unchecked")
protected void startContainerInternal(
ContainerTokenIdentifier containerTokenIdentifier,
StartContainerRequest request, String remoteUser)
throws YarnException, IOException {
ContainerId containerId = containerTokenIdentifier.getContainerID();
String containerIdStr = containerId.toString();
String user = containerTokenIdentifier.getApplicationSubmitter();
Resource containerResource = containerTokenIdentifier.getResource();
LOG.info("Start request for " + containerIdStr + " by user " + remoteUser +
" with resource " + containerResource);
ContainerLaunchContext launchContext = request.getContainerLaunchContext();
// Sanity check for local resources
for (Map.Entry<String, LocalResource> rsrc : launchContext
.getLocalResources().entrySet()) {
if (rsrc.getValue() == null || rsrc.getValue().getResource() == null) {
throw new YarnException("Null resource URL for local resource "
+ rsrc.getKey() + " : " + rsrc.getValue());
} else if (rsrc.getValue().getType() == null) {
throw new YarnException("Null resource type for local resource "
+ rsrc.getKey() + " : " + rsrc.getValue());
} else if (rsrc.getValue().getVisibility() == null) {
throw new YarnException("Null resource visibility for local resource "
+ rsrc.getKey() + " : " + rsrc.getValue());
}
}
Credentials credentials =
YarnServerSecurityUtils.parseCredentials(launchContext);
long containerStartTime = SystemClock.getInstance().getTime();
Container container =
new ContainerImpl(getConfig(), this.dispatcher,
launchContext, credentials, metrics, containerTokenIdentifier,
context, containerStartTime);
ApplicationId applicationID =
containerId.getApplicationAttemptId().getApplicationId();
if (context.getContainers().putIfAbsent(containerId, container) != null) {
NMAuditLogger.logFailure(remoteUser, AuditConstants.START_CONTAINER,
"ContainerManagerImpl", "Container already running on this node!",
applicationID, containerId);
throw RPCUtil.getRemoteException("Container " + containerIdStr
+ " already is running on this node!!");
}
this.readLock.lock();
try {
if (!isServiceStopped()) {
if (!context.getApplications().containsKey(applicationID)) {
// Create the application
// populate the flow context from the launch context if the timeline
// service v.2 is enabled
FlowContext flowContext =
getFlowContext(launchContext, applicationID);
Application application =
new ApplicationImpl(dispatcher, user, flowContext,
applicationID, credentials, context);
if (context.getApplications().putIfAbsent(applicationID,
application) == null) {
metrics.runningApplication();
LOG.info("Creating a new application reference for app "
+ applicationID);
LogAggregationContext logAggregationContext =
containerTokenIdentifier.getLogAggregationContext();
Map<ApplicationAccessType, String> appAcls =
container.getLaunchContext().getApplicationACLs();
context.getNMStateStore().storeApplication(applicationID,
buildAppProto(applicationID, user, credentials, appAcls,
logAggregationContext, flowContext));
dispatcher.getEventHandler().handle(new ApplicationInitEvent(
applicationID, appAcls, logAggregationContext));
}
} else if (containerTokenIdentifier.getContainerType()
== ContainerType.APPLICATION_MASTER) {
FlowContext flowContext =
getFlowContext(launchContext, applicationID);
if (flowContext != null) {
ApplicationImpl application =
(ApplicationImpl) context.getApplications().get(applicationID);
// update flowContext reference in ApplicationImpl
application.setFlowContext(flowContext);
// Required to update state store for recovery.
context.getNMStateStore().storeApplication(applicationID,
buildAppProto(applicationID, user, credentials,
container.getLaunchContext().getApplicationACLs(),
containerTokenIdentifier.getLogAggregationContext(),
flowContext));
LOG.info(
"Updated application reference with flowContext " + flowContext
+ " for app " + applicationID);
} else {
LOG.info("TimelineService V2.0 is not enabled. Skipping updating "
+ "flowContext for application " + applicationID);
}
}
this.context.getNMStateStore().storeContainer(containerId,
containerTokenIdentifier.getVersion(), containerStartTime, request);
dispatcher.getEventHandler().handle(
new ApplicationContainerInitEvent(container));
this.context.getContainerTokenSecretManager().startContainerSuccessful(
containerTokenIdentifier);
NMAuditLogger.logSuccess(remoteUser, AuditConstants.START_CONTAINER,
"ContainerManageImpl", applicationID, containerId);
// TODO launchedContainer misplaced -> doesn't necessarily mean a container
// launch. A finished Application will not launch containers.
metrics.launchedContainer();
metrics.allocateContainer(containerTokenIdentifier.getResource());
} else {
throw new YarnException(
"Container start failed as the NodeManager is " +
"in the process of shutting down");
}
} finally {
this.readLock.unlock();
}
}
private FlowContext getFlowContext(ContainerLaunchContext launchContext,
ApplicationId applicationID) {
FlowContext flowContext = null;
if (timelineServiceV2Enabled) {
String flowName = launchContext.getEnvironment()
.get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
String flowVersion = launchContext.getEnvironment()
.get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
String flowRunIdStr = launchContext.getEnvironment()
.get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
long flowRunId = 0L;
if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
flowRunId = Long.parseLong(flowRunIdStr);
}
flowContext = new FlowContext(flowName, flowVersion, flowRunId);
LOG.debug("Flow context: {} created for an application {}",
flowContext, applicationID);
}
return flowContext;
}
protected ContainerTokenIdentifier verifyAndGetContainerTokenIdentifier(
org.apache.hadoop.yarn.api.records.Token token,
ContainerTokenIdentifier containerTokenIdentifier) throws YarnException,
InvalidToken {
byte[] password =
context.getContainerTokenSecretManager().retrievePassword(
containerTokenIdentifier);
byte[] tokenPass = token.getPassword().array();
if (password == null || tokenPass == null
|| !MessageDigest.isEqual(password, tokenPass)) {
throw new InvalidToken(
"Invalid container token used for starting container on : "
+ context.getNodeId().toString());
}
return containerTokenIdentifier;
}
/**
* Increase resource of a list of containers on this NodeManager.
*/
@Override
@Deprecated
public IncreaseContainersResourceResponse increaseContainersResource(
IncreaseContainersResourceRequest requests)
throws YarnException, IOException {
ContainerUpdateResponse resp = updateContainer(
ContainerUpdateRequest.newInstance(requests.getContainersToIncrease()));
return IncreaseContainersResourceResponse.newInstance(
resp.getSuccessfullyUpdatedContainers(), resp.getFailedRequests());
}
/**
* Update resource of a list of containers on this NodeManager.
*/
@Override
public ContainerUpdateResponse updateContainer(ContainerUpdateRequest
request) throws YarnException, IOException {
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
authorizeUser(remoteUgi, nmTokenIdentifier);
List<ContainerId> successfullyUpdatedContainers
= new ArrayList<ContainerId>();
Map<ContainerId, SerializedException> failedContainers =
new HashMap<ContainerId, SerializedException>();
// Synchronize with NodeStatusUpdaterImpl#registerWithRM
// to avoid race condition during NM-RM resync (due to RM restart) while a
// container resource is being increased in NM, in particular when the
// increased container has not yet been added to the increasedContainers
// map in NMContext.
synchronized (this.context) {
// Process container resource increase requests
for (org.apache.hadoop.yarn.api.records.Token token :
request.getContainersToUpdate()) {
ContainerId containerId = null;
try {
if (token.getIdentifier() == null) {
throw new IOException(INVALID_CONTAINERTOKEN_MSG);
}
ContainerTokenIdentifier containerTokenIdentifier =
BuilderUtils.newContainerTokenIdentifier(token);
verifyAndGetContainerTokenIdentifier(token,
containerTokenIdentifier);
authorizeStartAndResourceIncreaseRequest(
nmTokenIdentifier, containerTokenIdentifier, false);
containerId = containerTokenIdentifier.getContainerID();
// Reuse the startContainer logic to update NMToken,
// as container resource increase request will have come with
// an updated NMToken.
updateNMTokenIdentifier(nmTokenIdentifier);
updateContainerInternal(containerId, containerTokenIdentifier);
successfullyUpdatedContainers.add(containerId);
} catch (YarnException | InvalidToken e) {
failedContainers.put(containerId, SerializedException.newInstance(e));
} catch (IOException e) {
throw RPCUtil.getRemoteException(e);
}
}
}
return ContainerUpdateResponse.newInstance(
successfullyUpdatedContainers, failedContainers);
}
@SuppressWarnings("unchecked")
private void updateContainerInternal(ContainerId containerId,
ContainerTokenIdentifier containerTokenIdentifier)
throws YarnException, IOException {
Container container = context.getContainers().get(containerId);
// Check container existence
if (container == null) {
if (nodeStatusUpdater.isContainerRecentlyStopped(containerId)) {
throw RPCUtil.getRemoteException("Container " + containerId.toString()
+ " was recently stopped on node manager.");
} else {
throw RPCUtil.getRemoteException("Container " + containerId.toString()
+ " is not handled by this NodeManager");
}
}
// Check container version.
int currentVersion = container.getContainerTokenIdentifier().getVersion();
if (containerTokenIdentifier.getVersion() <= currentVersion) {
throw RPCUtil.getRemoteException("Container " + containerId.toString()
+ " has update version [" + currentVersion + "] >= requested version"
+ " [" + containerTokenIdentifier.getVersion() + "]");
}
// Check validity of the target resource.
Resource currentResource = container.getResource();
ExecutionType currentExecType =
container.getContainerTokenIdentifier().getExecutionType();
boolean isResourceChange = false;
boolean isExecTypeUpdate = false;
Resource targetResource = containerTokenIdentifier.getResource();
ExecutionType targetExecType = containerTokenIdentifier.getExecutionType();
// Is true if either the resources has increased or execution type
// updated from opportunistic to guaranteed
boolean isIncrease = false;
if (!currentResource.equals(targetResource)) {
isResourceChange = true;
isIncrease = Resources.fitsIn(currentResource, targetResource)
&& !Resources.fitsIn(targetResource, currentResource);
} else if (!currentExecType.equals(targetExecType)) {
isExecTypeUpdate = true;
isIncrease = currentExecType == ExecutionType.OPPORTUNISTIC &&
targetExecType == ExecutionType.GUARANTEED;
}
if (isIncrease) {
org.apache.hadoop.yarn.api.records.Container increasedContainer = null;
if (isResourceChange) {
increasedContainer =
org.apache.hadoop.yarn.api.records.Container.newInstance(
containerId, null, null, targetResource, null,
null, currentExecType);
if (context.getIncreasedContainers().putIfAbsent(containerId,
increasedContainer) != null){
throw RPCUtil.getRemoteException("Container " + containerId.toString()
+ " resource is being increased -or- " +
"is undergoing ExecutionType promoted.");
}
}
}
this.readLock.lock();
try {
if (!serviceStopped) {
// Dispatch message to Container to actually
// make the change.
dispatcher.getEventHandler().handle(new UpdateContainerTokenEvent(
container.getContainerId(), containerTokenIdentifier,
isResourceChange, isExecTypeUpdate, isIncrease));
} else {
throw new YarnException(
"Unable to change container resource as the NodeManager is "
+ "in the process of shutting down");
}
} finally {
this.readLock.unlock();
}
}
@Private
@VisibleForTesting
protected void updateNMTokenIdentifier(NMTokenIdentifier nmTokenIdentifier)
throws InvalidToken {
context.getNMTokenSecretManager().appAttemptStartContainer(
nmTokenIdentifier);
}
/**
* Stop a list of containers running on this NodeManager.
*/
@Override
public StopContainersResponse stopContainers(StopContainersRequest requests)
throws YarnException, IOException {
List<ContainerId> succeededRequests = new ArrayList<ContainerId>();
Map<ContainerId, SerializedException> failedRequests =
new HashMap<ContainerId, SerializedException>();
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi);
if (identifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
String remoteUser = remoteUgi.getUserName();
for (ContainerId id : requests.getContainerIds()) {
try {
Container container = this.context.getContainers().get(id);
authorizeGetAndStopContainerRequest(id, container, true, identifier,
remoteUser);
stopContainerInternal(id, remoteUser);
succeededRequests.add(id);
} catch (YarnException e) {
failedRequests.put(id, SerializedException.newInstance(e));
}
}
return StopContainersResponse
.newInstance(succeededRequests, failedRequests);
}
@SuppressWarnings("unchecked")
protected void stopContainerInternal(ContainerId containerID,
String remoteUser)
throws YarnException, IOException {
String containerIDStr = containerID.toString();
Container container = this.context.getContainers().get(containerID);
LOG.info("Stopping container with container Id: " + containerIDStr);
if (container == null) {
if (!nodeStatusUpdater.isContainerRecentlyStopped(containerID)) {
throw RPCUtil.getRemoteException("Container " + containerIDStr
+ " is not handled by this NodeManager");
}
} else {
if (container.isRecovering()) {
throw new NMNotYetReadyException("Container " + containerIDStr
+ " is recovering, try later");
}
context.getNMStateStore().storeContainerKilled(containerID);
container.sendKillEvent(ContainerExitStatus.KILLED_BY_APPMASTER,
"Container killed by the ApplicationMaster.");
NMAuditLogger.logSuccess(remoteUser, AuditConstants.STOP_CONTAINER,
"ContainerManageImpl",
containerID.getApplicationAttemptId().getApplicationId(),
containerID);
}
}
/**
* Get a list of container statuses running on this NodeManager
*/
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws YarnException, IOException {
List<ContainerStatus> succeededRequests = new ArrayList<ContainerStatus>();
Map<ContainerId, SerializedException> failedRequests =
new HashMap<ContainerId, SerializedException>();
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi);
if (identifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
String remoteUser = remoteUgi.getUserName();
for (ContainerId id : request.getContainerIds()) {
try {
ContainerStatus status = getContainerStatusInternal(id, identifier,
remoteUser);
succeededRequests.add(status);
} catch (YarnException e) {
failedRequests.put(id, SerializedException.newInstance(e));
}
}
return GetContainerStatusesResponse.newInstance(succeededRequests,
failedRequests);
}
protected ContainerStatus getContainerStatusInternal(ContainerId containerID,
NMTokenIdentifier nmTokenIdentifier, String remoteUser)
throws YarnException {
String containerIDStr = containerID.toString();
Container container = this.context.getContainers().get(containerID);
LOG.info("Getting container-status for " + containerIDStr);
authorizeGetAndStopContainerRequest(containerID, container, false,
nmTokenIdentifier, remoteUser);
if (container == null) {
if (nodeStatusUpdater.isContainerRecentlyStopped(containerID)) {
throw RPCUtil.getRemoteException("Container " + containerIDStr
+ " was recently stopped on node manager.");
} else {
throw RPCUtil.getRemoteException("Container " + containerIDStr
+ " is not handled by this NodeManager");
}
}
ContainerStatus containerStatus = container.cloneAndGetContainerStatus();
logContainerStatus("Returning ", containerStatus);
return containerStatus;
}
private void logContainerStatus(String prefix, ContainerStatus status) {
StringBuilder sb = new StringBuilder();
sb.append(prefix);
sb.append("ContainerStatus: [");
sb.append("ContainerId: ");
sb.append(status.getContainerId()).append(", ");
sb.append("ExecutionType: ");
sb.append(status.getExecutionType()).append(", ");
sb.append("State: ");
sb.append(status.getState()).append(", ");
sb.append("Capability: ");
sb.append(status.getCapability()).append(", ");
sb.append("Diagnostics: ");
sb.append(LOG.isDebugEnabled() ? status.getDiagnostics() : "...");
sb.append(", ");
sb.append("ExitStatus: ");
sb.append(status.getExitStatus()).append(", ");
sb.append("IP: ");
sb.append(status.getIPs()).append(", ");
sb.append("Host: ");
sb.append(status.getHost()).append(", ");
sb.append("ExposedPorts: ");
sb.append(status.getExposedPorts()).append(", ");
sb.append("ContainerSubState: ");
sb.append(status.getContainerSubState());
sb.append("]");
LOG.info(sb.toString());
}
@Private
@VisibleForTesting
protected void authorizeGetAndStopContainerRequest(ContainerId containerId,
Container container, boolean stopRequest, NMTokenIdentifier identifier,
String remoteUser)
throws YarnException {
if (identifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
/*
* For get/stop container status; we need to verify that 1) User (NMToken)
* application attempt only has started container. 2) Requested containerId
* belongs to the same application attempt (NMToken) which was used. (Note:-
* This will prevent user in knowing another application's containers).
*/
ApplicationId nmTokenAppId =
identifier.getApplicationAttemptId().getApplicationId();
if ((!nmTokenAppId.equals(containerId.getApplicationAttemptId().getApplicationId()))
|| (container != null && !nmTokenAppId.equals(container
.getContainerId().getApplicationAttemptId().getApplicationId()))) {
String msg;
if (stopRequest) {
msg = identifier.getApplicationAttemptId()
+ " attempted to stop non-application container : "
+ containerId;
NMAuditLogger.logFailure(remoteUser, AuditConstants.STOP_CONTAINER,
"ContainerManagerImpl", "Trying to stop unknown container!",
nmTokenAppId, containerId);
} else {
msg = identifier.getApplicationAttemptId()
+ " attempted to get status for non-application container : "
+ containerId;
}
LOG.warn(msg);
throw RPCUtil.getRemoteException(msg);
}
}
| ReInitOp |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/ServletEndpointDiscovererTests.java | {
"start": 7165,
"end": 7307
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
@Import({ TestEndpoint.class, TestServletEndpoint.class })
static | EmptyConfiguration |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/globals/TemplateGlobalNamespaceValidationFailureTest.java | {
"start": 1770,
"end": 1856
} | class ____ {
@TemplateGlobal
static String user = "Fu";
}
}
| Globals |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/index/ConstPoolScanner.java | {
"start": 138,
"end": 1048
} | class ____ {
static final int CONSTANT_UTF8_TAG = 1;
static final int CONSTANT_INTEGER_TAG = 3;
static final int CONSTANT_FLOAT_TAG = 4;
static final int CONSTANT_LONG_TAG = 5;
static final int CONSTANT_DOUBLE_TAG = 6;
static final int CONSTANT_CLASS_TAG = 7;
static final int CONSTANT_STRING_TAG = 8;
static final int CONSTANT_FIELDREF_TAG = 9;
static final int CONSTANT_METHODREF_TAG = 10;
static final int CONSTANT_INTERFACE_METHODREF_TAG = 11;
static final int CONSTANT_NAME_AND_TYPE_TAG = 12;
static final int CONSTANT_METHOD_HANDLE_TAG = 15;
static final int CONSTANT_METHOD_TYPE_TAG = 16;
static final int CONSTANT_DYNAMIC_TAG = 17;
static final int CONSTANT_INVOKE_DYNAMIC_TAG = 18;
static final int CONSTANT_MODULE_TAG = 19;
static final int CONSTANT_PACKAGE_TAG = 20;
//TODO: at the moment this only looks for the | ConstPoolScanner |
java | elastic__elasticsearch | x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java | {
"start": 2816,
"end": 11670
} | class ____ extends DownsamplingIntegTestCase {
private static final Logger logger = LogManager.getLogger(ILMDownsampleDisruptionIT.class);
private static final String POLICY_NAME = "mypolicy";
public static final int DOC_COUNT = 10_000;
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(
LocalStateCompositeXPackPlugin.class,
Downsample.class,
AggregateMetricMapperPlugin.class,
LocalStateCompositeXPackPlugin.class,
IndexLifecycle.class,
Ccr.class
);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
Settings.Builder nodeSettings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
nodeSettings.put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s");
// This is necessary to prevent ILM installing a lifecycle policy, these tests assume a blank slate
nodeSettings.put(LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED, false);
return nodeSettings.build();
}
public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, long startTime, DownsampleConfig config)
throws IOException {
final Settings.Builder settings = indexSettings(numOfShards, numOfReplicas).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_KEYWORD))
.put(
IndexSettings.TIME_SERIES_START_TIME.getKey(),
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli())
)
.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2022-01-08T23:40:53.384Z");
if (randomBoolean()) {
settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean());
}
final XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties");
mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject();
mapping.startObject(FIELD_DIMENSION_KEYWORD).field("type", "keyword").field("time_series_dimension", true).endObject();
mapping.startObject(FIELD_DIMENSION_LONG).field("type", "long").field("time_series_dimension", true).endObject();
mapping.startObject(FIELD_METRIC_COUNTER_DOUBLE)
.field("type", "double") /* numeric label indexed as a metric */
.field("time_series_metric", "counter")
.endObject();
mapping.endObject().endObject().endObject();
assertAcked(indicesAdmin().prepareCreate(sourceIndex).setSettings(settings.build()).setMapping(mapping).get());
Map<String, Phase> phases = new HashMap<>();
phases.put(
"warm",
new Phase(
"warm",
TimeValue.ZERO,
Map.of(
"downsample",
new org.elasticsearch.xpack.core.ilm.DownsampleAction(
config.getFixedInterval(),
null,
randomBoolean(),
config.getSamplingMethod()
)
)
)
);
LifecyclePolicy policy = new LifecyclePolicy(POLICY_NAME, phases);
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, policy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).actionGet());
}
public void testILMDownsampleRollingRestart() throws Exception {
final InternalTestCluster cluster = internalCluster();
cluster.startMasterOnlyNodes(1);
cluster.startDataOnlyNodes(3);
ensureStableCluster(cluster.size());
ensureGreen();
final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
long startTime = LocalDateTime.parse("1993-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli();
DownsampleConfig.SamplingMethod samplingMethod = randomSamplingMethod();
final DownsampleConfig config = new DownsampleConfig(DateHistogramInterval.HOUR, samplingMethod);
setup(sourceIndex, 1, 0, startTime, config);
final Supplier<XContentBuilder> sourceSupplier = () -> {
final String ts = randomDateForInterval(config.getInterval(), startTime);
double counterValue = DATE_FORMATTER.parseMillis(ts);
final List<String> dimensionValues = new ArrayList<>(5);
for (int j = 0; j < randomIntBetween(1, 5); j++) {
dimensionValues.add(randomAlphaOfLength(6));
}
try {
return XContentFactory.jsonBuilder()
.startObject()
.field(FIELD_TIMESTAMP, ts)
.field(FIELD_DIMENSION_KEYWORD, randomFrom(dimensionValues))
.field(FIELD_DIMENSION_LONG, randomIntBetween(1, 10))
.field(FIELD_METRIC_COUNTER_DOUBLE, counterValue)
.endObject();
} catch (IOException e) {
throw new RuntimeException(e);
}
};
int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT);
cluster.rollingRestart(new InternalTestCluster.RestartCallback());
final String targetIndex = "downsample-1h-" + sourceIndex;
startDownsampleTaskViaIlm(sourceIndex, targetIndex);
assertBusy(() -> assertTargetIndex(cluster, targetIndex, indexedDocs, samplingMethod));
ensureGreen(targetIndex);
// We wait for ILM to successfully complete the phase
logger.info("Waiting for ILM to complete the phase for index [{}]", targetIndex);
awaitClusterState(clusterState -> {
IndexMetadata indexMetadata = clusterState.metadata().getProject().index(targetIndex);
return indexMetadata.getLifecycleExecutionState() != null
&& Objects.equals(indexMetadata.getLifecycleExecutionState().step(), PhaseCompleteStep.NAME);
});
}
private void startDownsampleTaskViaIlm(String sourceIndex, String targetIndex) throws Exception {
var request = new UpdateSettingsRequest(sourceIndex).settings(
Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, POLICY_NAME)
);
// Updating index.lifecycle.name setting may fail due to the rolling restart itself,
// we need to attempt it in a assertBusy(...)
assertBusy(() -> {
try {
if (indexExists(sourceIndex) == false) {
logger.info("The source index [{}] no longer exists, downsampling likely completed", sourceIndex);
return;
}
client().admin().indices().updateSettings(request).actionGet(TimeValue.timeValueSeconds(10));
} catch (Exception e) {
logger.warn(() -> format("encountered failure while updating [%s] index's ilm policy", sourceIndex), e);
throw new AssertionError(e);
}
}, 1, TimeUnit.MINUTES);
awaitIndexExists(targetIndex, TimeValue.timeValueSeconds(60));
assertBusy(() -> {
var getSettingsResponse = client().admin()
.indices()
.getSettings(new GetSettingsRequest(TEST_REQUEST_TIMEOUT).indices(targetIndex))
.actionGet();
assertThat(getSettingsResponse.getSetting(targetIndex, IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey()), equalTo("success"));
}, 60, TimeUnit.SECONDS);
}
private void assertTargetIndex(
final InternalTestCluster cluster,
final String targetIndex,
int indexedDocs,
DownsampleConfig.SamplingMethod samplingMethod
) {
final GetIndexResponse getIndexResponse = cluster.client()
.admin()
.indices()
.getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(targetIndex))
.actionGet();
assertEquals(1, getIndexResponse.indices().length);
assertEquals(
getIndexResponse.getSetting(targetIndex, IndexMetadata.INDEX_DOWNSAMPLE_METHOD_KEY),
DownsampleConfig.SamplingMethod.getOrDefault(samplingMethod).toString()
);
assertResponse(
cluster.client()
.prepareSearch(targetIndex)
.setQuery(new MatchAllQueryBuilder())
.setSize(Math.min(DOC_COUNT, indexedDocs))
.setTrackTotalHitsUpTo(Integer.MAX_VALUE),
targetIndexSearch -> {
assertTrue(targetIndexSearch.getHits().getHits().length > 0);
}
);
}
}
| ILMDownsampleDisruptionIT |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java | {
"start": 969,
"end": 3348
} | class ____ implements UpdateApiKeyRequestTranslator {
private static final RoleDescriptor.Parser ROLE_DESCRIPTOR_PARSER = RoleDescriptor.parserBuilder().allowRestriction(true).build();
private static final ConstructingObjectParser<Payload, Void> PARSER = createParser(ROLE_DESCRIPTOR_PARSER::parse);
@SuppressWarnings("unchecked")
protected static ConstructingObjectParser<Payload, Void> createParser(
CheckedBiFunction<String, XContentParser, RoleDescriptor, IOException> roleDescriptorParser
) {
final ConstructingObjectParser<Payload, Void> parser = new ConstructingObjectParser<>(
"update_api_key_request_payload",
a -> new Payload(
(List<RoleDescriptor>) a[0],
(Map<String, Object>) a[1],
TimeValue.parseTimeValue((String) a[2], null, "expiration")
)
);
parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> {
p.nextToken();
return roleDescriptorParser.apply(n, p);
}, new ParseField("role_descriptors"));
parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata"));
parser.declareString(optionalConstructorArg(), new ParseField("expiration"));
return parser;
}
@Override
public UpdateApiKeyRequest translate(RestRequest request) throws IOException {
// Note that we use `ids` here even though we only support a single ID. This is because the route where this translator is used
// shares a path prefix with `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the
// same wildcard if their paths share a prefix
final String apiKeyId = request.param("ids");
if (false == request.hasContent()) {
return UpdateApiKeyRequest.usingApiKeyId(apiKeyId);
}
final Payload payload = PARSER.parse(request.contentParser(), null);
return new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration);
}
protected record Payload(List<RoleDescriptor> roleDescriptors, Map<String, Object> metadata, TimeValue expiration) {}
}
}
| Default |
java | greenrobot__EventBus | EventBusTest/src/org/greenrobot/eventbus/EventBusAndroidMultithreadedTest.java | {
"start": 993,
"end": 1929
} | class ____ extends EventBusMultithreadedTest {
@Test
public void testSubscribeUnSubscribeAndPostMixedEventType() throws InterruptedException {
List<SubscribeUnsubscribeThread> threads = new ArrayList<SubscribeUnsubscribeThread>();
// Debug.startMethodTracing("testSubscribeUnSubscribeAndPostMixedEventType");
for (int i = 0; i < 5; i++) {
SubscribeUnsubscribeThread thread = new SubscribeUnsubscribeThread();
thread.start();
threads.add(thread);
}
// This test takes a bit longer, so just use fraction the regular count
runThreadsMixedEventType(COUNT / 4, 5);
for (SubscribeUnsubscribeThread thread : threads) {
thread.shutdown();
}
for (SubscribeUnsubscribeThread thread : threads) {
thread.join();
}
// Debug.stopMethodTracing();
}
public | EventBusAndroidMultithreadedTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NoAllocationCheckerTest.java | {
"start": 18305,
"end": 20790
} | class ____ {
// Calling safe methods is fine.
@NoAllocation
public boolean comparison(int n) {
return n > 1;
}
@NoAllocation
public void callNoAllocationMethod() {
comparison(5);
}
@NoAllocation
@SuppressWarnings({"foo, bar"})
public void annotatedWithArray() {}
@NoAllocation
public boolean arrayComparison(int[] a) {
return a.length > 0 && a[0] > 1;
}
// Non string operations are fine.
@NoAllocation
public int sumInts(int a, int b) {
return a + b;
}
@NoAllocation
public int addOne(int a) {
a += 1;
return a;
}
// Foreach is allowed on arrays.
@NoAllocation
public int forEachArray(int[] a) {
int last = -1;
for (int i : a) {
last = i;
}
return last;
}
// Varargs is ok if no autoboxing occurs.
@NoAllocation
public int varArgsMethod2(int a, int... b) {
return a + b[0];
}
@NoAllocation
public void callVarArgsNoAllocation(int[] b) {
varArgsMethod2(1, b);
}
@NoAllocation
public Object varArgsMethodObject2(Object a, Object... b) {
return b[0];
}
@NoAllocation
public void callVarArgsObject2(Object a, Object[] b) {
varArgsMethodObject2(a, b);
}
// Unboxing is fine.
@NoAllocation
public void unboxByCalling(Integer i) {
comparison(i);
}
@NoAllocation
public int binaryUnbox(Integer a, int b) {
return a + b;
}
// We can call a non-annotated method if we suppress warnings.
@NoAllocation
@SuppressWarnings("NoAllocation")
public void trustMe() {
String s = new String();
}
@NoAllocation
public void trusting() {
trustMe();
}
// Allocations are allowed in a throw statement.
@NoAllocation
public void throwNew() {
throw new RuntimeException();
}
@NoAllocation
public void throwNewArray() {
throw new RuntimeException(Arrays.toString(new int[10]));
}
@NoAllocation
public void throwMethod() {
throw new RuntimeException(Integer.toString(5));
}
@NoAllocation
public void throwStringConcatenation() {
throw new RuntimeException("a" + 5);
}
@NoAllocation
public void throwStringConcatenation2() {
throw new RuntimeException("a" + Integer.toString(5));
}
@NoAllocation
public void throwStringConcatenation3() {
throw new RuntimeException("a" + getInt());
}
@NoAllocation
public String throwStringConvCompoundAssign(int i) {
String s = "";
throw new RuntimeException(s += i);
}
| NoAllocationCheckerNegativeCases |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractLinearCounting.java | {
"start": 1063,
"end": 2605
} | class ____ extends AbstractCardinalityAlgorithm {
private static final int P2 = 25;
public AbstractLinearCounting(int precision) {
super(precision);
}
/**
* Add encoded value to the linear counting. Implementor should only accept the value if it has not been
* seen before.
*/
protected abstract int addEncoded(long bucketOrd, int encoded);
/**
* number of values in the counter.
*/
protected abstract int size(long bucketOrd);
public int collect(long bucketOrd, long hash) {
final int k = encodeHash(hash, p);
return addEncoded(bucketOrd, k);
}
@Override
public long cardinality(long bucketOrd) {
final long m = 1 << P2;
final long v = m - size(bucketOrd);
return linearCounting(m, v);
}
static long mask(int bits) {
return (1L << bits) - 1;
}
/**
* Encode the hash on 32 bits. The encoded hash cannot be equal to <code>0</code>.
*/
static int encodeHash(long hash, int p) {
final long e = hash >>> (64 - P2);
final long encoded;
if ((e & mask(P2 - p)) == 0) {
final int runLen = 1 + Math.min(Long.numberOfLeadingZeros(hash << P2), 64 - P2);
encoded = (e << 7) | (runLen << 1) | 1;
} else {
encoded = e << 1;
}
assert PackedInts.bitsRequired(encoded) <= 32;
assert encoded != 0;
return (int) encoded;
}
/** Iterator over the hash values */
public | AbstractLinearCounting |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/MergedAnnotationsCollection.java | {
"start": 7505,
"end": 10550
} | class ____<A extends Annotation> implements Spliterator<MergedAnnotation<A>> {
private final @Nullable Object requiredType;
private final int[] mappingCursors;
public AnnotationsSpliterator(@Nullable Object requiredType) {
this.mappingCursors = new int[annotations.length];
this.requiredType = requiredType;
}
@Override
public boolean tryAdvance(Consumer<? super MergedAnnotation<A>> action) {
int lowestDistance = Integer.MAX_VALUE;
int annotationResult = -1;
for (int annotationIndex = 0; annotationIndex < annotations.length; annotationIndex++) {
AnnotationTypeMapping mapping = getNextSuitableMapping(annotationIndex);
if (mapping != null && mapping.getDistance() < lowestDistance) {
annotationResult = annotationIndex;
lowestDistance = mapping.getDistance();
}
if (lowestDistance == 0) {
break;
}
}
if (annotationResult != -1) {
MergedAnnotation<A> mergedAnnotation = createMergedAnnotationIfPossible(
annotationResult, this.mappingCursors[annotationResult]);
this.mappingCursors[annotationResult]++;
if (mergedAnnotation == null) {
return tryAdvance(action);
}
action.accept(mergedAnnotation);
return true;
}
return false;
}
private @Nullable AnnotationTypeMapping getNextSuitableMapping(int annotationIndex) {
AnnotationTypeMapping mapping;
do {
mapping = getMapping(annotationIndex, this.mappingCursors[annotationIndex]);
if (mapping != null && isMappingForType(mapping, this.requiredType)) {
return mapping;
}
this.mappingCursors[annotationIndex]++;
}
while (mapping != null);
return null;
}
private @Nullable AnnotationTypeMapping getMapping(int annotationIndex, int mappingIndex) {
AnnotationTypeMappings mappings = MergedAnnotationsCollection.this.mappings[annotationIndex];
return (mappingIndex < mappings.size() ? mappings.get(mappingIndex) : null);
}
@SuppressWarnings("unchecked")
private @Nullable MergedAnnotation<A> createMergedAnnotationIfPossible(int annotationIndex, int mappingIndex) {
MergedAnnotation<?> root = annotations[annotationIndex];
if (mappingIndex == 0) {
return (MergedAnnotation<A>) root;
}
IntrospectionFailureLogger logger = (this.requiredType != null ?
IntrospectionFailureLogger.INFO : IntrospectionFailureLogger.DEBUG);
return TypeMappedAnnotation.createIfPossible(
mappings[annotationIndex].get(mappingIndex), root, logger);
}
@Override
public @Nullable Spliterator<MergedAnnotation<A>> trySplit() {
return null;
}
@Override
public long estimateSize() {
int size = 0;
for (int i = 0; i < annotations.length; i++) {
AnnotationTypeMappings mappings = MergedAnnotationsCollection.this.mappings[i];
int numberOfMappings = mappings.size();
numberOfMappings -= Math.min(this.mappingCursors[i], mappings.size());
size += numberOfMappings;
}
return size;
}
@Override
public int characteristics() {
return NONNULL | IMMUTABLE;
}
}
}
| AnnotationsSpliterator |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java | {
"start": 1959,
"end": 6586
} | class ____ {
private TestContainerLogsUtils() {}
/**
* Utility function to create container log file and upload
* it into remote file system.
* @param conf the configuration
* @param fs the FileSystem
* @param rootLogDir the root log directory
* @param appId the application id
* @param containerToContent mapping between container id and its content
* @param nodeId the nodeId
* @param fileName the log file name
* @param user the application user
* @param deleteRemoteLogDir whether to delete remote log dir.
* @throws IOException if we can not create log files locally
* or we can not upload container logs into RemoteFS.
*/
public static void createContainerLogFileInRemoteFS(Configuration conf,
FileSystem fs, String rootLogDir, ApplicationId appId,
Map<ContainerId, String> containerToContent, NodeId nodeId,
String fileName, String user, boolean deleteRemoteLogDir)
throws Exception {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
// create local logs
List<String> rootLogDirList = new ArrayList<String>();
rootLogDirList.add(rootLogDir);
Path rootLogDirPath = new Path(rootLogDir);
if (fs.exists(rootLogDirPath)) {
fs.delete(rootLogDirPath, true);
}
fs.mkdirs(rootLogDirPath);
// Make sure the target dir is created. If not, FileNotFoundException is thrown
fs.getFileStatus(rootLogDirPath);
Path appLogsDir = new Path(rootLogDirPath, appId.toString());
if (fs.exists(appLogsDir)) {
fs.delete(appLogsDir, true);
}
fs.mkdirs(appLogsDir);
// Make sure the target dir is created. If not, FileNotFoundException is thrown
fs.getFileStatus(appLogsDir);
createContainerLogInLocalDir(appLogsDir, containerToContent, fs, fileName);
// upload container logs to remote log dir
LogAggregationFileControllerFactory factory =
new LogAggregationFileControllerFactory(conf);
LogAggregationFileController fileController =
factory.getFileControllerForWrite();
Path path = fileController.getRemoteAppLogDir(appId, user);
if (fs.exists(path) && deleteRemoteLogDir) {
fs.delete(path, true);
}
fs.mkdirs(path);
// Make sure the target dir is created. If not, FileNotFoundException is thrown
fs.getFileStatus(path);
uploadContainerLogIntoRemoteDir(ugi, conf, rootLogDirList, nodeId, appId,
containerToContent.keySet(), path);
}
private static void createContainerLogInLocalDir(Path appLogsDir,
Map<ContainerId, String> containerToContent, FileSystem fs,
String fileName) throws IOException {
for (Map.Entry<ContainerId, String> containerAndContent :
containerToContent.entrySet()) {
ContainerId containerId = containerAndContent.getKey();
String content = containerAndContent.getValue();
Path containerLogsDir = new Path(appLogsDir, containerId.toString());
if (fs.exists(containerLogsDir)) {
fs.delete(containerLogsDir, true);
}
fs.mkdirs(containerLogsDir);
// Make sure the target dir is created. If not, FileNotFoundException is thrown
fs.getFileStatus(containerLogsDir);
Writer writer =
new FileWriter(new File(containerLogsDir.toString(), fileName));
writer.write(content);
writer.close();
}
}
private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi,
Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
ApplicationId appId, Iterable<ContainerId> containerIds, Path appDir)
throws Exception {
Path path =
new Path(appDir, LogAggregationUtils.getNodeString(nodeId));
LogAggregationFileControllerFactory factory
= new LogAggregationFileControllerFactory(configuration);
LogAggregationFileController fileController = factory
.getFileControllerForWrite();
try {
Map<ApplicationAccessType, String> appAcls = new HashMap<>();
appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
LogAggregationFileControllerContext context
= new LogAggregationFileControllerContext(
path, path, true, 1000,
appId, appAcls, nodeId, ugi);
fileController.initializeWriter(context);
for (ContainerId containerId : containerIds) {
fileController.write(new AggregatedLogFormat.LogKey(containerId),
new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
ugi.getShortUserName()));
}
} finally {
fileController.closeWriter();
}
}
}
| TestContainerLogsUtils |
java | google__guava | android/guava-tests/test/com/google/common/cache/CacheBuilderFactory.java | {
"start": 1280,
"end": 1434
} | class ____ creating {@link CacheBuilder} instances with all combinations of several sets of
* parameters.
*
* @author mike nonemacher
*/
@NullUnmarked
| for |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassAndBeanMethodTests.java | {
"start": 1334,
"end": 5422
} | class ____ {
@Test
void verifyEquals() throws Exception {
ConfigurationClass configurationClass1 = newConfigurationClass(Config1.class);
ConfigurationClass configurationClass2 = newConfigurationClass(Config1.class);
ConfigurationClass configurationClass3 = newConfigurationClass(Config2.class);
assertThat(configurationClass1).isNotEqualTo(null);
assertThat(configurationClass1).isNotSameAs(configurationClass2);
assertThat(configurationClass1).isEqualTo(configurationClass1);
assertThat(configurationClass2).isEqualTo(configurationClass2);
assertThat(configurationClass1).isEqualTo(configurationClass2);
assertThat(configurationClass2).isEqualTo(configurationClass1);
assertThat(configurationClass1).isNotEqualTo(configurationClass3);
assertThat(configurationClass3).isNotEqualTo(configurationClass2);
// ---------------------------------------------------------------------
List<BeanMethod> beanMethods1 = getBeanMethods(configurationClass1);
BeanMethod beanMethod_1_0 = beanMethods1.get(0);
BeanMethod beanMethod_1_1 = beanMethods1.get(1);
BeanMethod beanMethod_1_2 = beanMethods1.get(2);
List<BeanMethod> beanMethods2 = getBeanMethods(configurationClass2);
BeanMethod beanMethod_2_0 = beanMethods2.get(0);
BeanMethod beanMethod_2_1 = beanMethods2.get(1);
BeanMethod beanMethod_2_2 = beanMethods2.get(2);
List<BeanMethod> beanMethods3 = getBeanMethods(configurationClass3);
BeanMethod beanMethod_3_0 = beanMethods3.get(0);
BeanMethod beanMethod_3_1 = beanMethods3.get(1);
BeanMethod beanMethod_3_2 = beanMethods3.get(2);
assertThat(beanMethod_1_0).isNotEqualTo(null);
assertThat(beanMethod_1_0).isNotSameAs(beanMethod_2_0);
assertThat(beanMethod_1_0).isEqualTo(beanMethod_1_0);
assertThat(beanMethod_1_0).isEqualTo(beanMethod_2_0);
assertThat(beanMethod_1_1).isEqualTo(beanMethod_2_1);
assertThat(beanMethod_1_2).isEqualTo(beanMethod_2_2);
assertThat(beanMethod_1_0.getMetadata().getMethodName()).isEqualTo(beanMethod_3_0.getMetadata().getMethodName());
assertThat(beanMethod_1_0).isNotEqualTo(beanMethod_3_0);
assertThat(beanMethod_1_1).isNotEqualTo(beanMethod_3_1);
assertThat(beanMethod_1_2).isNotEqualTo(beanMethod_3_2);
}
@Test
void verifyHashCode() throws Exception {
ConfigurationClass configurationClass1 = newConfigurationClass(Config1.class);
ConfigurationClass configurationClass2 = newConfigurationClass(Config1.class);
ConfigurationClass configurationClass3 = newConfigurationClass(Config2.class);
assertThat(configurationClass1).hasSameHashCodeAs(configurationClass2);
assertThat(configurationClass1).doesNotHaveSameHashCodeAs(configurationClass3);
// ---------------------------------------------------------------------
List<BeanMethod> beanMethods1 = getBeanMethods(configurationClass1);
BeanMethod beanMethod_1_0 = beanMethods1.get(0);
BeanMethod beanMethod_1_1 = beanMethods1.get(1);
BeanMethod beanMethod_1_2 = beanMethods1.get(2);
List<BeanMethod> beanMethods2 = getBeanMethods(configurationClass2);
BeanMethod beanMethod_2_0 = beanMethods2.get(0);
BeanMethod beanMethod_2_1 = beanMethods2.get(1);
BeanMethod beanMethod_2_2 = beanMethods2.get(2);
List<BeanMethod> beanMethods3 = getBeanMethods(configurationClass3);
BeanMethod beanMethod_3_0 = beanMethods3.get(0);
BeanMethod beanMethod_3_1 = beanMethods3.get(1);
BeanMethod beanMethod_3_2 = beanMethods3.get(2);
assertThat(beanMethod_1_0).hasSameHashCodeAs(beanMethod_2_0);
assertThat(beanMethod_1_1).hasSameHashCodeAs(beanMethod_2_1);
assertThat(beanMethod_1_2).hasSameHashCodeAs(beanMethod_2_2);
assertThat(beanMethod_1_0).doesNotHaveSameHashCodeAs(beanMethod_3_0);
assertThat(beanMethod_1_1).doesNotHaveSameHashCodeAs(beanMethod_3_1);
assertThat(beanMethod_1_2).doesNotHaveSameHashCodeAs(beanMethod_3_2);
}
@Test
void verifyToString() throws Exception {
ConfigurationClass configurationClass = newConfigurationClass(Config1.class);
assertThat(configurationClass.toString())
.startsWith("ConfigurationClass: beanName 'Config1', | ConfigurationClassAndBeanMethodTests |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsPartitionPushDown.java | {
"start": 2020,
"end": 2807
} | interface ____ not implemented, the data is read entirely with a subsequent
* filter operation after the source.
*
* <p>For efficiency, the planner can pass the number of required partitions and a source must
* exclude those partitions from reading (including reading the metadata). See {@link
* #applyPartitions(List)}.
*
* <p>By default, the list of all partitions is queried from the catalog if necessary. However,
* depending on the external system, it might be necessary to query the list of partitions in a
* connector-specific way instead of using the catalog information. See {@link #listPartitions()}.
*
* <p>Note: After partitions are pushed into the source, the runtime will not perform a subsequent
* filter operation for partition keys.
*/
@PublicEvolving
public | is |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/version/VersionUtils.java | {
"start": 972,
"end": 2346
} | class ____ {
private static final Properties VERSIONS = new Properties();
// This block needs to be here, so it is executed before MICRONAUT_VERSION
static {
URL resource = VersionUtils.class.getResource("/micronaut-version.properties");
if (resource != null) {
try (Reader reader = new InputStreamReader(resource.openStream(), StandardCharsets.UTF_8)) {
VERSIONS.load(reader);
} catch (IOException e) {
// ignore
}
}
}
/**
* The current version of Micronaut.
*/
@SuppressWarnings("DeclarationOrder")
public static final String MICRONAUT_VERSION = getMicronautVersion();
/**
* Return whether the current version of Micronaut is at least the given version using semantic rules.
*
* @param requiredVersion The required version
* @return True if it is
*/
public static boolean isAtLeastMicronautVersion(String requiredVersion) {
return MICRONAUT_VERSION == null || SemanticVersion.isAtLeast(MICRONAUT_VERSION, requiredVersion);
}
@Nullable
public static String getMicronautVersion() {
Object micronautVersion = VERSIONS.get("micronaut.version");
if (micronautVersion != null) {
return micronautVersion.toString();
}
return null;
}
}
| VersionUtils |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/TestingJobLeaderIdService.java | {
"start": 3688,
"end": 6227
} | class ____ {
private Consumer<JobLeaderIdActions> startConsumer = ignored -> {};
private Runnable stopRunnable = () -> {};
private Runnable clearRunnable = () -> {};
private Consumer<JobID> addJobConsumer = ignored -> {};
private Consumer<JobID> removeJobConsumer = ignored -> {};
private Function<JobID, Boolean> containsJobFunction = ignored -> false;
private Function<JobID, CompletableFuture<JobMasterId>> getLeaderIdFunction =
ignored -> new CompletableFuture<>();
private BiFunction<JobID, UUID, Boolean> isValidTimeoutFunction =
(ignoredA, ignoredB) -> false;
public Builder setStartConsumer(Consumer<JobLeaderIdActions> startConsumer) {
this.startConsumer = startConsumer;
return this;
}
public Builder setStopRunnable(Runnable stopRunnable) {
this.stopRunnable = stopRunnable;
return this;
}
public Builder setClearRunnable(Runnable clearRunnable) {
this.clearRunnable = clearRunnable;
return this;
}
public Builder setAddJobConsumer(Consumer<JobID> addJobConsumer) {
this.addJobConsumer = addJobConsumer;
return this;
}
public Builder setRemoveJobConsumer(Consumer<JobID> removeJobConsumer) {
this.removeJobConsumer = removeJobConsumer;
return this;
}
public Builder setContainsJobFunction(Function<JobID, Boolean> containsJobFunction) {
this.containsJobFunction = containsJobFunction;
return this;
}
public Builder setGetLeaderIdFunction(
Function<JobID, CompletableFuture<JobMasterId>> getLeaderIdFunction) {
this.getLeaderIdFunction = getLeaderIdFunction;
return this;
}
public Builder setIsValidTimeoutFunction(
BiFunction<JobID, UUID, Boolean> isValidTimeoutFunction) {
this.isValidTimeoutFunction = isValidTimeoutFunction;
return this;
}
public TestingJobLeaderIdService build() {
return new TestingJobLeaderIdService(
startConsumer,
stopRunnable,
clearRunnable,
addJobConsumer,
removeJobConsumer,
containsJobFunction,
getLeaderIdFunction,
isValidTimeoutFunction);
}
}
}
| Builder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatchers.java | {
"start": 6085,
"end": 8918
} | class ____ implements MappingRuleMatcher {
/**
* The list of matchers to be checked during evaluation.
*/
private MappingRuleMatcher[] matchers;
/**
* Constructor.
* @param matchers List of matchers to be checked during evaluation
*/
OrMatcher(MappingRuleMatcher...matchers) {
this.matchers = matchers;
}
/**
* This match method will go through all the provided matchers and call
* their match method, if any of them match we return true.
* @param variables The variable context, which contains all the variables
* @return true if any of the matchers match
*/
@Override
public boolean match(VariableContext variables) {
for (MappingRuleMatcher matcher : matchers) {
if (matcher.match(variables)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "OrMatcher{" +
"matchers=" + Arrays.toString(matchers) +
'}';
}
}
/**
* Convenience method to create a variable matcher which matches against the
* username.
* @param userName The username to be matched
* @return VariableMatcher with %user as the variable
*/
public static MappingRuleMatcher createUserMatcher(String userName) {
return new VariableMatcher("%user", userName);
}
/**
* Convenience method to create a group matcher which matches against the
* groups of the user.
* @param groupName The groupName to be matched
* @return UserGroupMatcher
*/
public static MappingRuleMatcher createUserGroupMatcher(String groupName) {
return new UserGroupMatcher(groupName);
}
/**
* Convenience method to create a composite matcher which matches against the
* user's user name and the user's primary group. Only matches if both
* matches.
* @param userName The username to be matched
* @param groupName The groupName to be matched
* @return AndMatcher with two matchers one for userName and one for
* primaryGroup
*/
public static MappingRuleMatcher createUserGroupMatcher(
String userName, String groupName) {
return new AndMatcher(
createUserMatcher(userName),
createUserGroupMatcher(groupName));
}
/**
* Convenience method to create a variable matcher which matches against the
* submitted application's name.
* @param name The name to be matched
* @return VariableMatcher with %application as the variable
*/
public static MappingRuleMatcher createApplicationNameMatcher(String name) {
return new VariableMatcher("%application", name);
}
/**
* Convenience method to create a matcher that matches all
* @return MatchAllMatcher.
*/
public static MappingRuleMatcher createAllMatcher() {
return new MatchAllMatcher();
}
}
| OrMatcher |
java | apache__flink | flink-metrics/flink-metrics-core/src/main/java/org/apache/flink/metrics/MeterView.java | {
"start": 1763,
"end": 3893
} | class ____ implements Meter, View {
private static final int DEFAULT_TIME_SPAN_IN_SECONDS = 60;
/** The underlying counter maintaining the count. */
private final Counter counter;
/** The time-span over which the average is calculated. */
private final int timeSpanInSeconds;
/** Circular array containing the history of values. */
private final long[] values;
/** The index in the array for the current time. */
private int time = 0;
/** The last rate we computed. */
private double currentRate = 0;
public MeterView(int timeSpanInSeconds) {
this(new SimpleCounter(), timeSpanInSeconds);
}
public MeterView(Counter counter) {
this(counter, DEFAULT_TIME_SPAN_IN_SECONDS);
}
public MeterView(Counter counter, int timeSpanInSeconds) {
this.counter = counter;
// the time-span must be larger than the update-interval as otherwise the array has a size
// of 1,
// for which no rate can be computed as no distinct before/after measurement exists.
this.timeSpanInSeconds =
Math.max(
timeSpanInSeconds - (timeSpanInSeconds % UPDATE_INTERVAL_SECONDS),
UPDATE_INTERVAL_SECONDS);
this.values = new long[this.timeSpanInSeconds / UPDATE_INTERVAL_SECONDS + 1];
}
public MeterView(Gauge<? extends Number> numberGauge) {
this(new GaugeWrapper(numberGauge));
}
@Override
public void markEvent() {
this.counter.inc();
}
@Override
public void markEvent(long n) {
this.counter.inc(n);
}
@Override
public long getCount() {
return counter.getCount();
}
@Override
public double getRate() {
return currentRate;
}
@Override
public void update() {
time = (time + 1) % values.length;
values[time] = counter.getCount();
currentRate =
((double) (values[time] - values[(time + 1) % values.length]) / timeSpanInSeconds);
}
/** Simple wrapper to expose number gauges as timers. */
static | MeterView |
java | hibernate__hibernate-orm | hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacyDialect.java | {
"start": 2587,
"end": 10532
} | class ____ extends MySQLLegacyDialect {
private static final DatabaseVersion VERSION5 = DatabaseVersion.make( 5 );
private static final DatabaseVersion VERSION57 = DatabaseVersion.make( 5, 7 );
public MariaDBLegacyDialect() {
this( DatabaseVersion.make( 5 ) );
}
public MariaDBLegacyDialect(DatabaseVersion version) {
super(version);
}
public MariaDBLegacyDialect(DialectResolutionInfo info) {
super( createVersion( info ), MySQLServerConfiguration.fromDialectResolutionInfo( info ) );
registerKeywords( info );
}
protected LockingSupport buildLockingSupport() {
return new MariaDBLockingSupport( getVersion() );
}
@Override
public DatabaseVersion getMySQLVersion() {
return getVersion().isBefore( 5, 3 )
? VERSION5
: VERSION57;
}
@Override
public NationalizationSupport getNationalizationSupport() {
return NationalizationSupport.IMPLICIT;
}
@Override
public void initializeFunctionRegistry(FunctionContributions functionContributions) {
super.initializeFunctionRegistry(functionContributions);
if ( getVersion().isSameOrAfter( 10, 2 ) ) {
CommonFunctionFactory commonFunctionFactory = new CommonFunctionFactory(functionContributions);
commonFunctionFactory.windowFunctions();
commonFunctionFactory.hypotheticalOrderedSetAggregates_windowEmulation();
functionContributions.getFunctionRegistry().registerNamed(
"json_valid",
functionContributions.getTypeConfiguration()
.getBasicTypeRegistry()
.resolve( StandardBasicTypes.BOOLEAN )
);
commonFunctionFactory.jsonValue_mariadb();
commonFunctionFactory.jsonArray_mariadb();
commonFunctionFactory.jsonQuery_mariadb();
commonFunctionFactory.jsonArrayAgg_mariadb();
commonFunctionFactory.jsonObjectAgg_mariadb();
commonFunctionFactory.jsonArrayAppend_mariadb();
if ( getVersion().isSameOrAfter( 10, 3, 3 ) ) {
if ( getVersion().isSameOrAfter( 10, 6 ) ) {
commonFunctionFactory.unnest_emulated();
commonFunctionFactory.jsonTable_mysql();
}
commonFunctionFactory.inverseDistributionOrderedSetAggregates_windowEmulation();
functionContributions.getFunctionRegistry().patternDescriptorBuilder( "median", "median(?1) over ()" )
.setInvariantType( functionContributions.getTypeConfiguration().getBasicTypeRegistry().resolve( StandardBasicTypes.DOUBLE ) )
.setExactArgumentCount( 1 )
.setParameterTypes(NUMERIC)
.register();
}
}
}
@Override
protected void registerColumnTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.registerColumnTypes( typeContributions, serviceRegistry );
final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry();
if ( getVersion().isSameOrAfter( 10, 7 ) ) {
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( UUID, "uuid", this ) );
}
}
@Override
public AggregateSupport getAggregateSupport() {
return getVersion().isSameOrAfter( 10, 2 )
? MySQLAggregateSupport.forMariaDB( this )
: AggregateSupportImpl.INSTANCE;
}
@Override
public JdbcType resolveSqlTypeDescriptor(
String columnTypeName,
int jdbcTypeCode,
int precision,
int scale,
JdbcTypeRegistry jdbcTypeRegistry) {
switch ( jdbcTypeCode ) {
case OTHER:
switch ( columnTypeName ) {
case "uuid":
jdbcTypeCode = UUID;
break;
}
break;
case VARBINARY:
if ( "GEOMETRY".equals( columnTypeName ) ) {
jdbcTypeCode = GEOMETRY;
}
break;
}
return super.resolveSqlTypeDescriptor( columnTypeName, jdbcTypeCode, precision, scale, jdbcTypeRegistry );
}
@Override
public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration().getJdbcTypeRegistry();
// Make sure we register the JSON type descriptor before calling super, because MariaDB does not need casting
jdbcTypeRegistry.addDescriptorIfAbsent( SqlTypes.JSON, MariaDBCastingJsonJdbcType.INSTANCE );
jdbcTypeRegistry.addTypeConstructorIfAbsent( MariaDBCastingJsonArrayJdbcTypeConstructor.INSTANCE );
super.contributeTypes( typeContributions, serviceRegistry );
if ( getVersion().isSameOrAfter( 10, 7 ) ) {
jdbcTypeRegistry.addDescriptorIfAbsent( VarcharUUIDJdbcType.INSTANCE );
}
}
@Override
public String castPattern(CastType from, CastType to) {
return to == CastType.JSON
? "json_extract(?1,'$')"
: super.castPattern( from, to );
}
@Override
public SqlAstTranslatorFactory getSqlAstTranslatorFactory() {
return new StandardSqlAstTranslatorFactory() {
@Override
protected <T extends JdbcOperation> SqlAstTranslator<T> buildTranslator(
SessionFactoryImplementor sessionFactory, Statement statement) {
return new MariaDBLegacySqlAstTranslator<>( sessionFactory, statement, MariaDBLegacyDialect.this );
}
};
}
@Override
public boolean supportsWindowFunctions() {
return getVersion().isSameOrAfter( 10, 2 );
}
@Override
public boolean supportsLateral() {
// See https://jira.mariadb.org/browse/MDEV-19078
return false;
}
@Override
public boolean supportsRecursiveCTE() {
return getVersion().isSameOrAfter( 10, 2 );
}
@Override
public boolean supportsColumnCheck() {
return getVersion().isSameOrAfter( 10, 2 );
}
@Override
public boolean supportsNamedColumnCheck() {
return false;
}
@Override
public boolean doesRoundTemporalOnOverflow() {
// See https://jira.mariadb.org/browse/MDEV-16991
return false;
}
@Override
protected MySQLStorageEngine getDefaultMySQLStorageEngine() {
return InnoDBStorageEngine.INSTANCE;
}
@Override
public boolean supportsIfExistsBeforeConstraintName() {
return getVersion().isSameOrAfter( 10 );
}
@Override
public boolean supportsIfExistsAfterAlterTable() {
return getVersion().isSameOrAfter( 10, 5 );
}
@Override
public SequenceSupport getSequenceSupport() {
return getVersion().isBefore( 10, 3 )
? super.getSequenceSupport()
: MariaDBSequenceSupport.INSTANCE;
}
@Override
public String getQuerySequencesString() {
return getSequenceSupport().supportsSequences()
? "select table_name from information_schema.TABLES where table_schema=database() and table_type='SEQUENCE'"
: super.getQuerySequencesString(); //fancy way to write "null"
}
@Override
public SequenceInformationExtractor getSequenceInformationExtractor() {
return getSequenceSupport().supportsSequences()
? SequenceInformationExtractorMariaDBDatabaseImpl.INSTANCE
: super.getSequenceInformationExtractor();
}
@Override
boolean supportsForShare() {
//only supported on MySQL
return false;
}
@Override
boolean supportsAliasLocks() {
//only supported on MySQL
return false;
}
@Override
public FunctionalDependencyAnalysisSupport getFunctionalDependencyAnalysisSupport() {
return FunctionalDependencyAnalysisSupportImpl.TABLE_GROUP_AND_CONSTANTS;
}
@Override
public IdentifierHelper buildIdentifierHelper(IdentifierHelperBuilder builder, DatabaseMetaData metadata)
throws SQLException {
// some MariaDB drivers does not return case strategy info
builder.setUnquotedCaseStrategy( IdentifierCaseStrategy.MIXED );
builder.setQuotedCaseStrategy( IdentifierCaseStrategy.MIXED );
return super.buildIdentifierHelper( builder, metadata );
}
@Override
public String getDual() {
return "dual";
}
@Override
public String getFromDualForSelectOnly() {
return getVersion().isBefore( 10, 4 ) ? ( " from " + getDual() ) : "";
}
@Override
public boolean supportsIntersect() {
return getVersion().isSameOrAfter( 10, 3 );
}
@Override
public boolean supportsSimpleQueryGrouping() {
return getVersion().isSameOrAfter( 10, 4 );
}
@Override
public boolean supportsWithClause() {
return getVersion().isSameOrAfter( 10, 2 );
}
@Override
public boolean supportsWithClauseInSubquery() {
return false;
}
}
| MariaDBLegacyDialect |
java | alibaba__nacos | client-basic/src/test/java/com/alibaba/nacos/client/auth/ram/RamClientAuthServiceImplTest.java | {
"start": 1607,
"end": 5332
} | class ____ {
private static final String MOCK = "mock";
@Mock
private AbstractResourceInjector mockResourceInjector;
private RamClientAuthServiceImpl ramClientAuthService;
private Properties akSkProperties;
private Properties roleProperties;
private RamContext ramContext;
private RequestResource resource;
@BeforeEach
void setUp() throws Exception {
ramClientAuthService = new RamClientAuthServiceImpl();
Map<String, AbstractResourceInjector> resourceInjectors = (Map<String, AbstractResourceInjector>) ReflectUtils.getFieldValue(
ramClientAuthService, "resourceInjectors");
resourceInjectors.clear();
resourceInjectors.put(MOCK, mockResourceInjector);
ramContext = (RamContext) ReflectUtils.getFieldValue(ramClientAuthService, "ramContext");
akSkProperties = new Properties();
roleProperties = new Properties();
akSkProperties.setProperty(PropertyKeyConst.ACCESS_KEY, PropertyKeyConst.ACCESS_KEY);
akSkProperties.setProperty(PropertyKeyConst.SECRET_KEY, PropertyKeyConst.SECRET_KEY);
roleProperties.setProperty(PropertyKeyConst.RAM_ROLE_NAME, PropertyKeyConst.RAM_ROLE_NAME);
resource = new RequestResource();
}
@AfterEach
void tearDown() throws NacosException {
ramClientAuthService.shutdown();
}
@Test
void testLoginWithAkSk() {
assertTrue(ramClientAuthService.login(akSkProperties));
assertEquals(PropertyKeyConst.ACCESS_KEY, ramContext.getAccessKey());
assertEquals(PropertyKeyConst.SECRET_KEY, ramContext.getSecretKey());
assertNull(ramContext.getRamRoleName());
assertTrue(ramClientAuthService.login(roleProperties));
assertEquals(PropertyKeyConst.ACCESS_KEY, ramContext.getAccessKey());
assertEquals(PropertyKeyConst.SECRET_KEY, ramContext.getSecretKey());
assertNull(ramContext.getRamRoleName());
}
@Test
void testLoginWithRoleName() {
assertTrue(ramClientAuthService.login(roleProperties));
assertNull(ramContext.getAccessKey(), PropertyKeyConst.ACCESS_KEY);
assertNull(ramContext.getSecretKey(), PropertyKeyConst.SECRET_KEY);
assertEquals(PropertyKeyConst.RAM_ROLE_NAME, ramContext.getRamRoleName());
assertTrue(ramClientAuthService.login(akSkProperties));
assertNull(ramContext.getAccessKey(), PropertyKeyConst.ACCESS_KEY);
assertNull(ramContext.getSecretKey(), PropertyKeyConst.SECRET_KEY);
assertEquals(PropertyKeyConst.RAM_ROLE_NAME, ramContext.getRamRoleName());
}
@Test
void testGetLoginIdentityContextWithoutLogin() {
LoginIdentityContext actual = ramClientAuthService.getLoginIdentityContext(resource);
assertTrue(actual.getAllKey().isEmpty());
verify(mockResourceInjector, never()).doInject(resource, ramContext, actual);
}
@Test
void testGetLoginIdentityContextWithoutInjector() {
ramClientAuthService.login(akSkProperties);
LoginIdentityContext actual = ramClientAuthService.getLoginIdentityContext(resource);
assertTrue(actual.getAllKey().isEmpty());
verify(mockResourceInjector, never()).doInject(resource, ramContext, actual);
}
@Test
void testGetLoginIdentityContextWithInjector() {
ramClientAuthService.login(akSkProperties);
resource.setType(MOCK);
LoginIdentityContext actual = ramClientAuthService.getLoginIdentityContext(resource);
assertTrue(actual.getAllKey().isEmpty());
verify(mockResourceInjector).doInject(resource, ramContext, actual);
}
}
| RamClientAuthServiceImplTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java | {
"start": 1502,
"end": 2741
} | class ____ extends AcknowledgedRequest<Request> {
private final String id;
private final Instant from;
public Request(String id, Instant from, TimeValue timeout) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout);
this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName());
this.from = from;
}
public Request(StreamInput in) throws IOException {
super(in);
id = in.readString();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) {
from = in.readOptionalInstant();
} else {
from = null;
}
}
public String getId() {
return id;
}
public Instant from() {
return from;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(id);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) {
out.writeOptionalInstant(from);
}
}
@Override
public int hashCode() {
// the base | Request |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/h2/Http2CancelableStreamObserver.java | {
"start": 959,
"end": 1197
} | interface ____<T> extends StreamObserver<T> {
void setCancellationContext(CancellationContext cancellationContext);
CancellationContext getCancellationContext();
void cancel(Throwable throwable);
}
| Http2CancelableStreamObserver |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/server/reactive/AsyncIntegrationTests.java | {
"start": 1979,
"end": 2439
} | class ____ implements HttpHandler {
@Override
@SuppressWarnings("deprecation")
public Mono<Void> handle(ServerHttpRequest request, ServerHttpResponse response) {
return response.writeWith(Flux.just("h", "e", "l", "l", "o")
.delayElements(Duration.ofMillis(100))
.publishOn(asyncGroup)
.collect(DefaultDataBufferFactory.sharedInstance::allocateBuffer,
(buffer, str) -> buffer.write(str.getBytes())));
}
}
}
| AsyncHandler |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/ClingSupport.java | {
"start": 1480,
"end": 3650
} | class ____ {
static final String CORE_CLASS_REALM_ID = "plexus.core";
protected final ClassWorld classWorld;
protected final boolean classWorldManaged;
/**
* Ctor that creates "managed" ClassWorld. This constructor is not used in "normal" circumstances.
*/
public ClingSupport() {
this(new ClassWorld(CORE_CLASS_REALM_ID, Thread.currentThread().getContextClassLoader()), true);
}
/**
* Ctor to be used when running in ClassWorlds Launcher.
*/
public ClingSupport(ClassWorld classWorld) {
this(classWorld, false);
}
private ClingSupport(ClassWorld classWorld, boolean classWorldManaged) {
this.classWorld = requireNonNull(classWorld);
this.classWorldManaged = classWorldManaged;
}
/**
* The main entry point.
*/
public int run(
String[] args,
@Nullable InputStream stdIn,
@Nullable OutputStream stdOut,
@Nullable OutputStream stdErr,
boolean embedded)
throws IOException {
try (Invoker invoker = createInvoker()) {
return invoker.invoke(createParser()
.parseInvocation(createParserRequestBuilder(args)
.stdIn(stdIn)
.stdOut(stdOut)
.stdErr(stdErr)
.embedded(embedded)
.build()));
} catch (InvokerException.ExitException e) {
return e.getExitCode();
} catch (Exception e) {
// last resort; as ideally we should get ExitException only
new SystemLogger(stdErr).error(e.getMessage(), e);
return 1;
} finally {
if (classWorldManaged) {
classWorld.close();
}
}
}
protected MessageBuilderFactory createMessageBuilderFactory() {
return new JLineMessageBuilderFactory();
}
protected abstract Invoker createInvoker();
protected abstract Parser createParser();
protected abstract ParserRequest.Builder createParserRequestBuilder(String[] args);
}
| ClingSupport |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryMethodInvocationMatcherTest.java | {
"start": 4674,
"end": 5165
} | class ____ {
private static final Matcher<StatementTree> TARGETED =
expressionStatement(
methodInvocation(
instanceMethod().onDescendantOfAny("java.lang.Class", "java.lang.String")));
}
""")
.addOutputLines(
"Test.java",
"""
import static com.google.errorprone.matchers.Matchers.*;
import com.google.errorprone.matchers.Matcher;
import com.sun.source.tree.StatementTree;
public | Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetoone/OneToOneInXmlTests.java | {
"start": 584,
"end": 1458
} | class ____ {
@Test
@JiraKey( "HHH-4606" )
@DomainModel( xmlMappings = "org/hibernate/orm/test/annotations/onetoone/orm.xml" )
@SuppressWarnings("JUnitMalformedDeclaration")
public void testJoinColumnConfiguredInXml(DomainModelScope scope) {
final PersistentClass pc = scope.getDomainModel().getEntityBinding( Son.class.getName() );
Table table = pc.getJoins().get( 0 ).getTable();
Iterator<Column> columnItr = table.getColumns().iterator();
boolean fooFound = false;
boolean barFound = false;
while ( columnItr.hasNext() ) {
Column column = columnItr.next();
if ( column.getName().equals( "foo" ) ) {
fooFound = true;
}
if ( column.getName().equals( "bar" ) ) {
barFound = true;
}
}
if ( !(fooFound && barFound) ) {
fail( "The mapping defines join columns which could not be found in the metadata." );
}
}
}
| OneToOneInXmlTests |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/ArgumentCaptor.java | {
"start": 2591,
"end": 5988
} | class ____<T> {
private final CapturingMatcher<T> capturingMatcher;
private final Class<? extends T> clazz;
private ArgumentCaptor(Class<? extends T> clazz) {
this.clazz = clazz;
this.capturingMatcher = new CapturingMatcher<>(clazz);
}
/**
* Use it to capture the argument. This method <b>must be used inside of verification</b>.
* <p>
* Internally, this method registers a special implementation of an {@link ArgumentMatcher}.
* This argument matcher stores the argument value so that you can use it later to perform assertions.
* <p>
* See examples in javadoc for {@link ArgumentCaptor} class.
*
* @return null or default values
*/
public T capture() {
T ignored = Mockito.argThat(capturingMatcher);
return defaultValue(clazz);
}
/**
* Returns the captured value of the argument. When capturing varargs use {@link #getAllValues()}.
* <p>
* If verified method was called multiple times then this method returns the latest captured value.
* <p>
* See examples in javadoc for {@link ArgumentCaptor} class.
*
* @return captured argument value
*/
public T getValue() {
return this.capturingMatcher.getLastValue();
}
/**
* Returns all captured values. Use it when capturing varargs or when the verified method was called multiple times.
* When varargs method was called multiple times, this method returns merged list of all values from all invocations.
* <p>
* Example:
* <pre class="code"><code class="java">
* mock.doSomething(new Person("John");
* mock.doSomething(new Person("Jane");
*
* ArgumentCaptor<Person> peopleCaptor = ArgumentCaptor.forClass(Person.class);
* verify(mock, times(2)).doSomething(peopleCaptor.capture());
*
* List<Person> capturedPeople = peopleCaptor.getAllValues();
* assertEquals("John", capturedPeople.get(0).getName());
* assertEquals("Jane", capturedPeople.get(1).getName());
* </pre>
*
* Example of capturing varargs:
* <pre class="code"><code class="java">
* mock.countPeople(new Person("John"), new Person("Jane"); //vararg method
*
* ArgumentCaptor<Person> peopleCaptor = ArgumentCaptor.forClass(Person.class);
*
* verify(mock).countPeople(peopleCaptor.capture());
*
* List expected = asList(new Person("John"), new Person("Jane"));
* assertEquals(expected, peopleCaptor.getAllValues());
* </code></pre>
* See more examples in javadoc for {@link ArgumentCaptor} class.
*
* @return captured argument value
*/
public List<T> getAllValues() {
return this.capturingMatcher.getAllValues();
}
/**
* Build a new <code>ArgumentCaptor</code>.
* <p>
* An <code>ArgumentCaptor</code> will perform type checks (since Mockito 5.0.0).
*
* @param clazz Type matching the parameter to be captured.
* @param <S> Type of clazz
* @param <U> Type of object captured by the newly built ArgumentCaptor
* @return A new ArgumentCaptor
*/
public static <U, S extends U> ArgumentCaptor<U> forClass(Class<S> clazz) {
return new ArgumentCaptor<>(clazz);
}
/**
* Build a new <code>ArgumentCaptor</code> by inferring the | ArgumentCaptor |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/PathMatcherFactory.java | {
"start": 7004,
"end": 7643
} | interface ____ the
* given patterns match all files.
*
* @return path matcher that unconditionally returns {@code true} for all files
*/
@Nonnull
PathMatcher includesAll();
/**
* {@return whether the given matcher includes all files}.
* This method may conservatively returns {@code false} if case of doubt.
* A return value of {@code true} means that the pattern is certain to match all files.
*
* @param matcher the matcher to test
*/
default boolean isIncludesAll(@Nonnull PathMatcher matcher) {
return Objects.requireNonNull(matcher) == includesAll();
}
}
| when |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 388917,
"end": 392577
} | class ____ extends YamlDeserializerBase<FlatpackDataFormat> {
public FlatpackDataFormatDeserializer() {
super(FlatpackDataFormat.class);
}
@Override
protected FlatpackDataFormat newInstance() {
return new FlatpackDataFormat();
}
@Override
protected boolean setProperty(FlatpackDataFormat target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "allowShortLines": {
String val = asText(node);
target.setAllowShortLines(val);
break;
}
case "definition": {
String val = asText(node);
target.setDefinition(val);
break;
}
case "delimiter": {
String val = asText(node);
target.setDelimiter(val);
break;
}
case "fixed": {
String val = asText(node);
target.setFixed(val);
break;
}
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "ignoreExtraColumns": {
String val = asText(node);
target.setIgnoreExtraColumns(val);
break;
}
case "ignoreFirstRecord": {
String val = asText(node);
target.setIgnoreFirstRecord(val);
break;
}
case "parserFactory": {
String val = asText(node);
target.setParserFactory(val);
break;
}
case "textQualifier": {
String val = asText(node);
target.setTextQualifier(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "fory",
types = org.apache.camel.model.dataformat.ForyDataFormat.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Fory",
description = "Serialize and deserialize messages using Apache Fory",
deprecated = false,
properties = {
@YamlProperty(name = "allowAutoWiredFory", type = "boolean", defaultValue = "true", description = "Whether to auto-discover Fory from the registry", displayName = "Allow Auto Wired Fory"),
@YamlProperty(name = "id", type = "string", description = "The id of this node", displayName = "Id"),
@YamlProperty(name = "requireClassRegistration", type = "boolean", defaultValue = "true", description = "Whether to require register classes", displayName = "Require Class Registration"),
@YamlProperty(name = "threadSafe", type = "boolean", defaultValue = "true", description = "Whether to use the threadsafe Fory", displayName = "Thread Safe"),
@YamlProperty(name = "unmarshalType", type = "string", description = "Class of the java type to use when unmarshalling", displayName = "Unmarshal Type")
}
)
public static | FlatpackDataFormatDeserializer |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/processor/AbstractSalesforceProcessor.java | {
"start": 9369,
"end": 9786
} | class ____ whichever parameter was non-null
*/
protected Class<?> getSObjectClass(String sObjectName, String className) throws SalesforceException {
Class<?> sObjectClass = null;
if (sObjectName != null) {
sObjectClass = classMap.get(sObjectName);
if (sObjectClass == null) {
throw new SalesforceException(
String.format("SObject | by |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldNotContainSequenceOfCharSequence_Test.java | {
"start": 1432,
"end": 2955
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
var factory = shouldNotContainSequence("Yoda", array("Yo", "da"), 0);
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting actual:%n" +
" \"Yoda\"%n" +
"to not contain sequence:%n" +
" [\"Yo\", \"da\"]%n" +
"but was found at index 0%n"));
}
@Test
void should_create_error_message_with_custom_comparison_strategy() {
// GIVEN
var factory = shouldNotContainSequence("Yoda", array("Yo", "da"), 0,
new ComparatorBasedComparisonStrategy(CaseInsensitiveStringComparator.INSTANCE));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting actual:%n" +
" \"Yoda\"%n" +
"to not contain sequence:%n" +
" [\"Yo\", \"da\"]%n" +
"but was found at index 0%n" +
"when comparing values using CaseInsensitiveStringComparator"));
}
}
| ShouldNotContainSequenceOfCharSequence_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/type/SQLServerNVarCharTypeTest.java | {
"start": 1126,
"end": 1728
} | class ____ {
private SchemaExport schemaExport;
@BeforeEach
public void setUp() {
schemaExport = new SchemaExport().setHaltOnError( true ).setFormat( false );
}
@AfterEach
public void tearDown(DomainModelScope modelScope) {
schemaExport.drop( EnumSet.of( TargetType.DATABASE ), modelScope.getDomainModel() );
}
@Test
public void testSchemaIsCreatedWithoutExceptions(DomainModelScope modelScope) {
schemaExport.createOnly( EnumSet.of( TargetType.DATABASE ), modelScope.getDomainModel() );
}
@Entity(name = "MyEntity")
@Table(name = "MY_ENTITY")
public static | SQLServerNVarCharTypeTest |
java | apache__flink | flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatTest.java | {
"start": 9193,
"end": 9859
} | class ____ implements RecordReader<String, Long> {
@Override
public float getProgress() throws IOException {
return 0;
}
@Override
public boolean next(String s, Long aLong) throws IOException {
return false;
}
@Override
public String createKey() {
return null;
}
@Override
public Long createValue() {
return null;
}
@Override
public long getPos() throws IOException {
return 0;
}
@Override
public void close() throws IOException {}
}
private | DummyRecordReader |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/Navigate.java | {
"start": 1054,
"end": 1695
} | interface ____<T> {
/**
* Next group of outputs
* <p/>
* Important only invoke this once, as this method do not carry state, and is not intended to be used in a while
* loop, but used by a if statement instead.
*
* @return next group or <tt>null</tt> if no more outputs
*/
List<T> next();
/**
* Are there more outputs?
* <p/>
* Important only invoke this once, as this method do not carry state, and is not intended to be used in a while
* loop, but used by a if statement instead.
*
* @return <tt>true</tt> if more outputs
*/
boolean hasNext();
}
| Navigate |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/UploadContentProviders.java | {
"start": 7203,
"end": 11752
} | class ____<T extends InputStream>
implements ContentStreamProvider, Closeable {
/**
* Size of the data.
*/
private final int size;
/**
* Probe to check if the stream is open.
* Invoked in {@link #checkOpen()}, which is itself
* invoked in {@link #newStream()}.
*/
private final Supplier<Boolean> isOpen;
/**
* How many times has a stream been created?
*/
private int streamCreationCount;
/**
* Current stream. Null if not opened yet.
* When {@link #newStream()} is called, this is set to the new value,
* Note: when the input stream itself is closed, this reference is not updated.
* Therefore this field not being null does not imply that the stream is open.
*/
private T currentStream;
/**
* When did this upload start?
* Use in error messages.
*/
private final LocalDateTime startTime;
/**
* Constructor.
* @param size size of the data. Must be non-negative.
*/
protected BaseContentProvider(int size) {
this(size, null);
}
/**
* Constructor.
* @param size size of the data. Must be non-negative.
* @param isOpen optional predicate to check if the stream is open.
*/
protected BaseContentProvider(int size, @Nullable Supplier<Boolean> isOpen) {
checkArgument(size >= 0, "size is negative: %s", size);
this.size = size;
this.isOpen = isOpen;
this.startTime = LocalDateTime.now();
}
/**
* Check if the stream is open.
* If the stream is not open, raise an exception
* @throws IllegalStateException if the stream is not open.
*/
private void checkOpen() {
checkState(isOpen == null || isOpen.get(), "Stream is closed: %s", this);
}
/**
* Close the current stream.
*/
@Override
public void close() {
cleanupWithLogger(LOG, getCurrentStream());
setCurrentStream(null);
}
/**
* Create a new stream.
* <p>
* Calls {@link #close()} to ensure that any existing stream is closed,
* then {@link #checkOpen()} to verify that the data source is still open.
* Logs if this is a subsequent event as it implies a failure of the first attempt.
* @return the new stream
*/
@Override
public final InputStream newStream() {
close();
checkOpen();
streamCreationCount++;
if (streamCreationCount == 2) {
// the stream has been recreated for the first time.
// notify only once for this stream, so as not to flood
// the logs.
// originally logged at info; logs at debug because HADOOP-19516
// means that this message is very common with S3 Express stores.
LOG.debug("Stream recreated: {}", this);
}
return setCurrentStream(createNewStream());
}
/**
* Override point for subclasses to create their new streams.
* @return a stream
*/
protected abstract T createNewStream();
/**
* How many times has a stream been created?
* @return stream creation count
*/
public int getStreamCreationCount() {
return streamCreationCount;
}
/**
* Size as set by constructor parameter.
* @return size of the data
*/
public int getSize() {
return size;
}
/**
* When did this upload start?
* @return start time
*/
public LocalDateTime getStartTime() {
return startTime;
}
/**
* Current stream.
* When {@link #newStream()} is called, this is set to the new value,
* after closing the previous one.
* <p>
* Why? The AWS SDK implementations do this, so there
* is an implication that it is needed to avoid keeping streams
* open on retries.
* @return the current stream, or null if none is open.
*/
protected T getCurrentStream() {
return currentStream;
}
/**
* Set the current stream.
* @param stream the new stream
* @return the current stream.
*/
protected T setCurrentStream(T stream) {
this.currentStream = stream;
return stream;
}
@Override
public String toString() {
return "BaseContentProvider{" +
"size=" + size +
", initiated at " + startTime +
", streamCreationCount=" + streamCreationCount +
", currentStream=" + currentStream +
'}';
}
}
/**
* Content provider for a file with an offset.
*/
private static final | BaseContentProvider |
java | google__guice | core/src/com/google/inject/internal/aop/FastClass.java | {
"start": 2625,
"end": 2769
} | class ____ therefore acts like a bound invoker
* to the appropriate constructor or method of the host class.
*
* <p>A handle to the fast- | instance |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/DefaultObjectDeserializerTest4.java | {
"start": 267,
"end": 740
} | class ____ extends TestCase {
public void test_0() throws Exception {
DefaultJSONParser parser = new DefaultJSONParser("{\"id\":3, \"name\":\"xx\"}", ParserConfig.getGlobalInstance());
Entity entity = new Entity();
parser.parseObject(entity);
}
public void test_1() throws Exception {
JSON.parseObject("{\"id\":3, \"name\":\"xx\"}", Entity.class, 0, Feature.IgnoreNotMatch);
}
public static | DefaultObjectDeserializerTest4 |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java | {
"start": 89673,
"end": 90820
} | class ____ extends InternalTerminationTransition {
public InternalRebootTransition(){
super(JobStateInternal.REBOOT, JobStateInternal.ERROR.toString());
}
}
@Override
public Configuration loadConfFile() throws IOException {
Path confPath = getConfFile();
FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(confPath), confPath.toString());
return jobConf;
}
public float getMaxAllowedFetchFailuresFraction() {
return maxAllowedFetchFailuresFraction;
}
public int getMaxFetchFailuresNotifications() {
return maxFetchFailuresNotifications;
}
@Override
public void setJobPriority(Priority priority) {
this.jobPriority = priority;
}
@Override
public int getFailedMaps() {
return failedMapTaskCount;
}
@Override
public int getFailedReduces() {
return failedReduceTaskCount;
}
@Override
public int getKilledMaps() {
return killedMapTaskCount;
}
@Override
public int getKilledReduces() {
return killedReduceTaskCount;
}
}
| InternalRebootTransition |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java | {
"start": 1763,
"end": 1904
} | class ____ be used interchangeably with DatanodeInfo
return super.equals(o);
}
@Override
public int hashCode() {
// allows this | to |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/ConcurrentModificationExceptionOnMultiThreadedVerificationTest.java | {
"start": 753,
"end": 1899
} | class ____ {
int nThreads = 1;
static final int TIMES = 100;
static final int INTERVAL_MILLIS = 10;
ITarget target = Mockito.mock(ITarget.class);
ExecutorService fixedThreadPool;
@Before
public void setUp() {
target = Mockito.mock(ITarget.class);
fixedThreadPool = Executors.newFixedThreadPool(nThreads);
}
@Test
public void shouldSuccessfullyVerifyConcurrentInvocationsWithTimeout() throws Exception {
int potentialOverhead =
1000; // Leave 1000ms extra before timing out as leeway for test overheads
int expectedMaxTestLength = TIMES * INTERVAL_MILLIS + potentialOverhead;
reset(target);
startInvocations();
verify(target, timeout(expectedMaxTestLength).times(TIMES * nThreads)).targetMethod("arg");
verifyNoMoreInteractions(target);
}
private void startInvocations() throws InterruptedException, ExecutionException {
for (int i = 0; i < nThreads; i++) {
fixedThreadPool.submit(new TargetInvoker(i));
}
}
public | ConcurrentModificationExceptionOnMultiThreadedVerificationTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/groups/Properties_ofType_Test.java | {
"start": 1015,
"end": 1422
} | class ____ {
@Test
void should_create_a_new_Properties() {
// WHEN
Properties<String> properties = extractProperty("id").ofType(String.class);
// THEN
then(FieldSupport.EXTRACTION.fieldValue("propertyName", String.class, properties)).isEqualTo("id");
then(FieldSupport.EXTRACTION.fieldValue("propertyType", Class.class, properties)).isEqualTo(String.class);
}
}
| Properties_ofType_Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.