language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractBeanDefinitionParser.java | {
"start": 2107,
"end": 9660
} | class ____ implements BeanDefinitionParser {
/** Constant for the "id" attribute. */
public static final String ID_ATTRIBUTE = "id";
/** Constant for the "name" attribute. */
public static final String NAME_ATTRIBUTE = "name";
@Override
@SuppressWarnings("NullAway") // Dataflow analysis limitation
public final @Nullable BeanDefinition parse(Element element, ParserContext parserContext) {
AbstractBeanDefinition definition = parseInternal(element, parserContext);
if (definition != null && !parserContext.isNested()) {
try {
String id = resolveId(element, definition, parserContext);
if (!StringUtils.hasText(id)) {
parserContext.getReaderContext().error(
"Id is required for element '" + parserContext.getDelegate().getLocalName(element) +
"' when used as a top-level tag", element);
}
String[] aliases = null;
if (shouldParseNameAsAliases()) {
String name = element.getAttribute(NAME_ATTRIBUTE);
if (StringUtils.hasLength(name)) {
aliases = StringUtils.trimArrayElements(StringUtils.commaDelimitedListToStringArray(name));
}
}
BeanDefinitionHolder holder = new BeanDefinitionHolder(definition, id, aliases);
registerBeanDefinition(holder, parserContext.getRegistry());
if (shouldFireEvents()) {
BeanComponentDefinition componentDefinition = new BeanComponentDefinition(holder);
postProcessComponentDefinition(componentDefinition);
parserContext.registerComponent(componentDefinition);
}
}
catch (BeanDefinitionStoreException ex) {
String msg = ex.getMessage();
parserContext.getReaderContext().error((msg != null ? msg : ex.toString()), element);
return null;
}
}
return definition;
}
/**
* Resolve the ID for the supplied {@link BeanDefinition}.
* <p>When using {@link #shouldGenerateId generation}, a name is generated automatically.
* Otherwise, the ID is extracted from the "id" attribute, potentially with a
* {@link #shouldGenerateIdAsFallback() fallback} to a generated id.
* @param element the element that the bean definition has been built from
* @param definition the bean definition to be registered
* @param parserContext the object encapsulating the current state of the parsing process;
* provides access to a {@link org.springframework.beans.factory.support.BeanDefinitionRegistry}
* @return the resolved id
* @throws BeanDefinitionStoreException if no unique name could be generated
* for the given bean definition
*/
protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext)
throws BeanDefinitionStoreException {
if (shouldGenerateId()) {
return parserContext.getReaderContext().generateBeanName(definition);
}
else {
String id = element.getAttribute(ID_ATTRIBUTE);
if (!StringUtils.hasText(id) && shouldGenerateIdAsFallback()) {
id = parserContext.getReaderContext().generateBeanName(definition);
}
return id;
}
}
/**
* Register the supplied {@link BeanDefinitionHolder bean} with the supplied
* {@link BeanDefinitionRegistry registry}.
* <p>Subclasses can override this method to control whether the supplied
* {@link BeanDefinitionHolder bean} is actually even registered, or to
* register even more beans.
* <p>The default implementation registers the supplied {@link BeanDefinitionHolder bean}
* with the supplied {@link BeanDefinitionRegistry registry} only if the {@code isNested}
* parameter is {@code false}, because one typically does not want inner beans
* to be registered as top level beans.
* @param definition the bean definition to be registered
* @param registry the registry that the bean is to be registered with
* @see BeanDefinitionReaderUtils#registerBeanDefinition(BeanDefinitionHolder, BeanDefinitionRegistry)
*/
protected void registerBeanDefinition(BeanDefinitionHolder definition, BeanDefinitionRegistry registry) {
BeanDefinitionReaderUtils.registerBeanDefinition(definition, registry);
}
/**
* Central template method to actually parse the supplied {@link Element}
* into one or more {@link BeanDefinition BeanDefinitions}.
* @param element the element that is to be parsed into one or more {@link BeanDefinition BeanDefinitions}
* @param parserContext the object encapsulating the current state of the parsing process;
* provides access to a {@link org.springframework.beans.factory.support.BeanDefinitionRegistry}
* @return the primary {@link BeanDefinition} resulting from the parsing of the supplied {@link Element}
* @see #parse(org.w3c.dom.Element, ParserContext)
* @see #postProcessComponentDefinition(org.springframework.beans.factory.parsing.BeanComponentDefinition)
*/
protected abstract @Nullable AbstractBeanDefinition parseInternal(Element element, ParserContext parserContext);
/**
* Should an ID be generated instead of read from the passed in {@link Element}?
* <p>Disabled by default; subclasses can override this to enable ID generation.
* Note that this flag is about <i>always</i> generating an ID; the parser
* won't even check for an "id" attribute in this case.
* @return whether the parser should always generate an id
*/
protected boolean shouldGenerateId() {
return false;
}
/**
* Should an ID be generated instead if the passed in {@link Element} does not
* specify an "id" attribute explicitly?
* <p>Disabled by default; subclasses can override this to enable ID generation
* as fallback: The parser will first check for an "id" attribute in this case,
* only falling back to a generated ID if no value was specified.
* @return whether the parser should generate an id if no id was specified
*/
protected boolean shouldGenerateIdAsFallback() {
return false;
}
/**
* Determine whether the element's "name" attribute should get parsed as
* bean definition aliases, i.e. alternative bean definition names.
* <p>The default implementation returns {@code true}.
* @return whether the parser should evaluate the "name" attribute as aliases
* @since 4.1.5
*/
protected boolean shouldParseNameAsAliases() {
return true;
}
/**
* Determine whether this parser is supposed to fire a
* {@link org.springframework.beans.factory.parsing.BeanComponentDefinition}
* event after parsing the bean definition.
* <p>This implementation returns {@code true} by default; that is,
* an event will be fired when a bean definition has been completely parsed.
* Override this to return {@code false} in order to suppress the event.
* @return {@code true} in order to fire a component registration event
* after parsing the bean definition; {@code false} to suppress the event
* @see #postProcessComponentDefinition
* @see org.springframework.beans.factory.parsing.ReaderContext#fireComponentRegistered
*/
protected boolean shouldFireEvents() {
return true;
}
/**
* Hook method called after the primary parsing of a
* {@link BeanComponentDefinition} but before the
* {@link BeanComponentDefinition} has been registered with a
* {@link org.springframework.beans.factory.support.BeanDefinitionRegistry}.
* <p>Derived classes can override this method to supply any custom logic that
* is to be executed after all the parsing is finished.
* <p>The default implementation is a no-op.
* @param componentDefinition the {@link BeanComponentDefinition} that is to be processed
*/
protected void postProcessComponentDefinition(BeanComponentDefinition componentDefinition) {
}
}
| AbstractBeanDefinitionParser |
java | resilience4j__resilience4j | resilience4j-bulkhead/src/main/java/io/github/resilience4j/bulkhead/Bulkhead.java | {
"start": 17926,
"end": 18342
} | interface ____ extends io.github.resilience4j.core.EventPublisher<BulkheadEvent> {
EventPublisher onCallRejected(EventConsumer<BulkheadOnCallRejectedEvent> eventConsumer);
EventPublisher onCallPermitted(EventConsumer<BulkheadOnCallPermittedEvent> eventConsumer);
EventPublisher onCallFinished(EventConsumer<BulkheadOnCallFinishedEvent> eventConsumer);
}
/**
* This | EventPublisher |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 63685,
"end": 64004
} | class ____ {
int singletonList = 1;
Object foo = Collections.<Integer /* foo */>singletonList(singletonList);
Object bar = Collections.<Integer> /* foo */singletonList(singletonList);
Object baz = Collections.<Integer>singletonList(singletonList);
| Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java | {
"start": 2577,
"end": 6633
} | class ____ implements
HAServiceProtocol, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ =
MonitorHealthRequestProto.newBuilder().build();
private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ =
GetServiceStatusRequestProto.newBuilder().build();
private final HAServiceProtocolPB rpcProxy;
public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine2.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}
public HAServiceProtocolClientSideTranslatorPB(
InetSocketAddress addr, Configuration conf,
SocketFactory socketFactory, int timeout) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine2.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}
@Override
public void monitorHealth() throws IOException {
ipc(() -> rpcProxy.monitorHealth(NULL_CONTROLLER, MONITOR_HEALTH_REQ));
}
@Override
public void transitionToActive(StateChangeRequestInfo reqInfo) throws IOException {
TransitionToActiveRequestProto req =
TransitionToActiveRequestProto.newBuilder()
.setReqInfo(convert(reqInfo)).build();
ipc(() -> rpcProxy.transitionToActive(NULL_CONTROLLER, req));
}
@Override
public void transitionToStandby(StateChangeRequestInfo reqInfo) throws IOException {
TransitionToStandbyRequestProto req =
TransitionToStandbyRequestProto.newBuilder()
.setReqInfo(convert(reqInfo)).build();
ipc(() -> rpcProxy.transitionToStandby(NULL_CONTROLLER, req));
}
@Override
public void transitionToObserver(StateChangeRequestInfo reqInfo)
throws IOException {
TransitionToObserverRequestProto req =
TransitionToObserverRequestProto.newBuilder()
.setReqInfo(convert(reqInfo)).build();
ipc(() -> rpcProxy.transitionToObserver(NULL_CONTROLLER, req));
}
@Override
public HAServiceStatus getServiceStatus() throws IOException {
GetServiceStatusResponseProto status;
status = ipc(() -> rpcProxy.getServiceStatus(NULL_CONTROLLER,
GET_SERVICE_STATUS_REQ));
HAServiceStatus ret = new HAServiceStatus(
convert(status.getState()));
if (status.getReadyToBecomeActive()) {
ret.setReadyToBecomeActive();
} else {
ret.setNotReadyToBecomeActive(status.getNotReadyReason());
}
return ret;
}
private HAServiceState convert(HAServiceStateProto state) {
switch(state) {
case ACTIVE:
return HAServiceState.ACTIVE;
case STANDBY:
return HAServiceState.STANDBY;
case OBSERVER:
return HAServiceState.OBSERVER;
case INITIALIZING:
default:
return HAServiceState.INITIALIZING;
}
}
private HAStateChangeRequestInfoProto convert(StateChangeRequestInfo reqInfo) {
HARequestSource src;
switch (reqInfo.getSource()) {
case REQUEST_BY_USER:
src = HARequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = HARequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = HARequestSource.REQUEST_BY_ZKFC;
break;
default:
throw new IllegalArgumentException("Bad source: " + reqInfo.getSource());
}
return HAStateChangeRequestInfoProto.newBuilder()
.setReqSource(src)
.build();
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
}
| HAServiceProtocolClientSideTranslatorPB |
java | apache__dubbo | dubbo-plugin/dubbo-filter-validation/src/main/java/org/apache/dubbo/validation/Validation.java | {
"start": 1215,
"end": 1467
} | interface ____ {
/**
* Return the instance of {@link Validator} for a given url.
* @param url Invocation url
* @return Instance of {@link Validator}
*/
@Adaptive(VALIDATION_KEY)
Validator getValidator(URL url);
}
| Validation |
java | grpc__grpc-java | core/src/testFixtures/java/io/grpc/internal/AbstractTransportTest.java | {
"start": 3347,
"end": 102197
} | class ____ {
/**
* Use a small flow control to help detect flow control bugs. Don't use 64KiB to test
* SETTINGS/WINDOW_UPDATE exchange.
*/
public static final int TEST_FLOW_CONTROL_WINDOW = 65 * 1024;
protected static final int TIMEOUT_MS = 5000;
protected static final String GRPC_EXPERIMENTAL_SUPPORT_TRACING_MESSAGE_SIZES =
"GRPC_EXPERIMENTAL_SUPPORT_TRACING_MESSAGE_SIZES";
private static final Attributes.Key<String> ADDITIONAL_TRANSPORT_ATTR_KEY =
Attributes.Key.create("additional-attr");
private static final Attributes.Key<String> EAG_ATTR_KEY =
Attributes.Key.create("eag-attr");
private static final Attributes EAG_ATTRS =
Attributes.newBuilder().set(EAG_ATTR_KEY, "value").build();
protected final TransportTracer.Factory fakeClockTransportTracer = new TransportTracer.Factory(
new TimeProvider() {
@Override
public long currentTimeNanos() {
return fakeCurrentTimeNanos();
}
});
/**
* Returns a new server that when started will be able to be connected to from the client. Each
* returned instance should be new and yet be accessible by new client transports.
*/
protected abstract InternalServer newServer(
List<ServerStreamTracer.Factory> streamTracerFactories);
/**
* Builds a new server that is listening on the same port as the given server instance does.
*/
protected abstract InternalServer newServer(
int port, List<ServerStreamTracer.Factory> streamTracerFactories);
/**
* Returns a new transport that when started will be able to connect to {@code server}.
*/
protected abstract ManagedClientTransport newClientTransport(InternalServer server);
/**
* Returns the authority string used by a client to connect to {@code server}.
*/
protected abstract String testAuthority(InternalServer server);
protected final Attributes eagAttrs() {
return EAG_ATTRS;
}
protected final ChannelLogger transportLogger() {
return new ChannelLogger() {
@Override
public void log(ChannelLogLevel level, String message) {}
@Override
public void log(ChannelLogLevel level, String messageFormat, Object... args) {}
};
}
/**
* When non-null, will be shut down during tearDown(). However, it _must_ have been started with
* {@code serverListener}, otherwise tearDown() can't wait for shutdown which can put following
* tests in an indeterminate state.
*/
protected InternalServer server;
protected ServerTransport serverTransport;
protected ManagedClientTransport client;
protected MethodDescriptor<String, String> methodDescriptor =
MethodDescriptor.<String, String>newBuilder()
.setType(MethodDescriptor.MethodType.UNKNOWN)
.setFullMethodName("service/method")
.setRequestMarshaller(StringMarshaller.INSTANCE)
.setResponseMarshaller(StringMarshaller.INSTANCE)
.build();
private final CallOptions callOptions = CallOptions.DEFAULT;
private Metadata.Key<String> asciiKey = Metadata.Key.of(
"ascii-key", Metadata.ASCII_STRING_MARSHALLER);
private Metadata.Key<String> binaryKey = Metadata.Key.of(
"key-bin", StringBinaryMarshaller.INSTANCE);
private final Metadata.Key<String> tracerHeaderKey = Metadata.Key.of(
"tracer-key", Metadata.ASCII_STRING_MARSHALLER);
private final String tracerKeyValue = "tracer-key-value";
protected ManagedClientTransport.Listener mockClientTransportListener
= mock(ManagedClientTransport.Listener.class);
protected MockServerListener serverListener = new MockServerListener();
private ArgumentCaptor<Status> statusCaptor = ArgumentCaptor.forClass(Status.class);
protected final TestClientStreamTracer clientStreamTracer1 = new TestHeaderClientStreamTracer();
private final TestClientStreamTracer clientStreamTracer2 = new TestHeaderClientStreamTracer();
protected final ClientStreamTracer[] tracers = new ClientStreamTracer[] {
clientStreamTracer1, clientStreamTracer2
};
private final ClientStreamTracer[] noopTracers = new ClientStreamTracer[] {
new ClientStreamTracer() {}
};
protected final TestServerStreamTracer serverStreamTracer1 = new TestServerStreamTracer();
private final TestServerStreamTracer serverStreamTracer2 = new TestServerStreamTracer();
protected final ServerStreamTracer.Factory serverStreamTracerFactory = mock(
ServerStreamTracer.Factory.class,
delegatesTo(new ServerStreamTracer.Factory() {
final ArrayDeque<TestServerStreamTracer> tracers =
new ArrayDeque<>(Arrays.asList(serverStreamTracer1, serverStreamTracer2));
@Override
public ServerStreamTracer newServerStreamTracer(String fullMethodName, Metadata headers) {
TestServerStreamTracer tracer = tracers.poll();
if (tracer != null) {
return tracer;
}
return new TestServerStreamTracer();
}
}));
@Before
public void setUp() {
server = newServer(Arrays.asList(serverStreamTracerFactory));
when(mockClientTransportListener.filterTransport(any())).thenAnswer(i -> i.getArguments()[0]);
}
@After
public void tearDown() throws InterruptedException {
if (client != null) {
client.shutdownNow(Status.UNKNOWN.withDescription("teardown"));
}
if (serverTransport != null) {
serverTransport.shutdownNow(Status.UNKNOWN.withDescription("teardown"));
}
if (server != null) {
server.shutdown();
assertTrue(serverListener.waitForShutdown(TIMEOUT_MS, TimeUnit.MILLISECONDS));
}
}
/**
* Moves the clock forward, for tests that require moving the clock forward. It is the transport
* subclass's responsibility to implement this method.
*/
protected void advanceClock(long offset, TimeUnit unit) {
throw new UnsupportedOperationException();
}
/**
* Returns true if env var is set.
*/
protected static boolean isEnabledSupportTracingMessageSizes() {
return GrpcUtil.getFlag(GRPC_EXPERIMENTAL_SUPPORT_TRACING_MESSAGE_SIZES, false);
}
/**
* Returns the current time, for tests that rely on the clock.
*/
protected long fakeCurrentTimeNanos() {
throw new UnsupportedOperationException();
}
// TODO(ejona):
// multiple streams on same transport
// multiple client transports to same server
// halfClose to trigger flush (client and server)
// flow control pushes back (client and server)
// flow control provides precisely number of messages requested (client and server)
// onReady called when buffer drained (on server and client)
// test no start reentrancy (esp. during failure) (transport and call)
// multiple requests/responses (verifying contents received)
// server transport shutdown triggers client shutdown (via GOAWAY)
// queued message InputStreams are closed on stream cancel
// (and maybe exceptions handled)
/**
* Test for issue https://github.com/grpc/grpc-java/issues/1682 .
*/
@Test
public void frameAfterRstStreamShouldNotBreakClientChannel() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
// Try to create a sequence of frames so that the client receives a HEADERS or DATA frame
// after having sent a RST_STREAM to the server. Previously, this would have broken the
// Netty channel.
ClientStream stream = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
stream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
stream.flush();
stream.writeMessage(methodDescriptor.streamRequest("foo"));
stream.flush();
stream.cancel(Status.CANCELLED);
stream.flush();
serverStreamCreation.stream.writeHeaders(new Metadata(), true);
serverStreamCreation.stream.flush();
serverStreamCreation.stream.writeMessage(methodDescriptor.streamResponse("bar"));
serverStreamCreation.stream.flush();
assertEquals(
Status.CANCELLED, clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
ClientStreamListener mockClientStreamListener2 = mock(ClientStreamListener.class);
// Test that the channel is still usable i.e. we can receive headers from the server on a
// new stream.
stream = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
stream.start(mockClientStreamListener2);
serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverStreamCreation.stream.writeHeaders(new Metadata(), true);
serverStreamCreation.stream.flush();
verify(mockClientStreamListener2, timeout(TIMEOUT_MS)).headersRead(any(Metadata.class));
}
@Test
public void serverNotListening() throws Exception {
// Start server to just acquire a port.
server.start(serverListener);
client = newClientTransport(server);
server.shutdown();
assertTrue(serverListener.waitForShutdown(TIMEOUT_MS, TimeUnit.MILLISECONDS));
server = null;
InOrder inOrder = inOrder(mockClientTransportListener);
runIfNotNull(client.start(mockClientTransportListener));
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
ArgumentCaptor<Status> statusCaptor = ArgumentCaptor.forClass(Status.class);
inOrder.verify(mockClientTransportListener).transportShutdown(statusCaptor.capture());
assertCodeEquals(Status.UNAVAILABLE, statusCaptor.getValue());
inOrder.verify(mockClientTransportListener).transportTerminated();
verify(mockClientTransportListener, never()).transportReady();
verify(mockClientTransportListener, never()).transportInUse(anyBoolean());
}
@Test
public void clientStartStop() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
InOrder inOrder = inOrder(mockClientTransportListener);
startTransport(client, mockClientTransportListener);
Status shutdownReason = Status.UNAVAILABLE.withDescription("shutdown called");
client.shutdown(shutdownReason);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
inOrder.verify(mockClientTransportListener).transportShutdown(same(shutdownReason));
inOrder.verify(mockClientTransportListener).transportTerminated();
verify(mockClientTransportListener, never()).transportInUse(anyBoolean());
}
@Test
public void clientStartAndStopOnceConnected() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
InOrder inOrder = inOrder(mockClientTransportListener);
startTransport(client, mockClientTransportListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportReady();
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
client.shutdown(Status.UNAVAILABLE);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
inOrder.verify(mockClientTransportListener).transportShutdown(any(Status.class));
inOrder.verify(mockClientTransportListener).transportTerminated();
assertTrue(serverTransportListener.waitForTermination(TIMEOUT_MS, TimeUnit.MILLISECONDS));
server.shutdown();
assertTrue(serverListener.waitForShutdown(TIMEOUT_MS, TimeUnit.MILLISECONDS));
server = null;
verify(mockClientTransportListener, never()).transportInUse(anyBoolean());
}
@Test
public void checkClientAttributes() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
assumeTrue(client instanceof ConnectionClientTransport);
ConnectionClientTransport connectionClient = (ConnectionClientTransport) client;
startTransport(connectionClient, mockClientTransportListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportReady();
assertNotNull("security level should be set in client attributes",
connectionClient.getAttributes().get(GrpcAttributes.ATTR_SECURITY_LEVEL));
}
@Test
public void serverAlreadyListening() throws Exception {
client = null;
server.start(serverListener);
int port = -1;
SocketAddress addr = server.getListenSocketAddress();
if (addr instanceof InetSocketAddress) {
port = ((InetSocketAddress) addr).getPort();
}
InternalServer server2 = newServer(port, Arrays.asList(serverStreamTracerFactory));
assertThrows(IOException.class, () -> server2.start(new MockServerListener()));
}
@Test
public void serverStartInterrupted() throws Exception {
client = null;
// Just get free port
server.start(serverListener);
int port = -1;
SocketAddress addr = server.getListenSocketAddress();
if (addr instanceof InetSocketAddress) {
port = ((InetSocketAddress) addr).getPort();
}
assumeTrue("transport is not using InetSocketAddress", port != -1);
server.shutdown();
assertTrue(serverListener.waitForShutdown(TIMEOUT_MS, TimeUnit.MILLISECONDS));
server = newServer(port, Arrays.asList(serverStreamTracerFactory));
boolean success;
Thread.currentThread().interrupt();
try {
server.start(serverListener = new MockServerListener());
success = true;
} catch (Exception ex) {
success = false;
} finally {
Thread.interrupted(); // clear interruption
}
assumeTrue("apparently start is not impacted by interruption, so nothing to test", !success);
// second time should not throw, as the first time should not have bound to the port
server.start(serverListener);
}
@Test
public void openStreamPreventsTermination() throws Exception {
server.start(serverListener);
int port = -1;
SocketAddress addr = server.getListenSocketAddress();
if (addr instanceof InetSocketAddress) {
port = ((InetSocketAddress) addr).getPort();
}
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
client.shutdown(Status.UNAVAILABLE);
client = null;
server.shutdown();
serverTransport.shutdown();
serverTransport = null;
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportShutdown(any(Status.class));
assertTrue(serverListener.waitForShutdown(TIMEOUT_MS, TimeUnit.MILLISECONDS));
// A new server should be able to start listening, since the current server has given up
// resources. There may be cases this is impossible in the future, but for now it is a useful
// property.
serverListener = new MockServerListener();
server = newServer(port, Arrays.asList(serverStreamTracerFactory));
server.start(serverListener);
// Try to "flush" out any listener notifications on client and server. This also ensures that
// the stream still functions.
serverStream.writeHeaders(new Metadata(), true);
clientStream.halfClose();
assertNotNull(clientStreamListener.awaitHeaders(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStreamListener.awaitHalfClosed(TIMEOUT_MS, TimeUnit.MILLISECONDS));
verify(mockClientTransportListener, never()).transportTerminated();
verify(mockClientTransportListener, never()).transportInUse(false);
assertFalse(serverTransportListener.isTerminated());
clientStream.cancel(Status.CANCELLED);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
assertTrue(serverTransportListener.waitForTermination(TIMEOUT_MS, TimeUnit.MILLISECONDS));
}
@Test
public void shutdownNowKillsClientStream() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status status = Status.UNKNOWN.withDescription("test shutdownNow");
client.shutdownNow(status);
client = null;
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportShutdown(any(Status.class));
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
assertTrue(serverTransportListener.waitForTermination(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverTransportListener.isTerminated());
assertEquals(status, clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status serverStatus = serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertFalse(serverStatus.isOk());
assertTrue(clientStreamTracer1.await(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNull(clientStreamTracer1.getInboundTrailers());
assertStatusEquals(status, clientStreamTracer1.getStatus());
assertTrue(serverStreamTracer1.await(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertStatusEquals(serverStatus, serverStreamTracer1.getStatus());
}
@Test
public void shutdownNowKillsServerStream() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status shutdownStatus = Status.UNKNOWN.withDescription("test shutdownNow");
serverTransport.shutdownNow(shutdownStatus);
serverTransport = null;
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportShutdown(any(Status.class));
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
assertTrue(serverTransportListener.waitForTermination(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverTransportListener.isTerminated());
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertFalse(clientStreamStatus.isOk());
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(clientStreamTracer1.await(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNull(clientStreamTracer1.getInboundTrailers());
assertStatusEquals(clientStreamStatus, clientStreamTracer1.getStatus());
assertTrue(serverStreamTracer1.await(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertStatusEquals(shutdownStatus, serverStreamTracer1.getStatus());
// Generally will be same status provided to shutdownNow, but InProcessTransport can't
// differentiate between client and server shutdownNow. The status is not really used on
// server-side, so we don't care much.
assertNotNull(serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
}
@Test
public void ping() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientTransport.PingCallback mockPingCallback = mock(ClientTransport.PingCallback.class);
try {
client.ping(mockPingCallback, MoreExecutors.directExecutor());
} catch (UnsupportedOperationException ex) {
// Transport doesn't support ping, so this neither passes nor fails.
assumeTrue(false);
}
verify(mockPingCallback, timeout(TIMEOUT_MS)).onSuccess(ArgumentMatchers.anyLong());
verify(mockClientTransportListener, never()).transportInUse(anyBoolean());
}
@Test
public void ping_duringShutdown() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
// Stream prevents termination
ClientStream stream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
stream.start(clientStreamListener);
client.shutdown(Status.UNAVAILABLE);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportShutdown(any(Status.class));
ClientTransport.PingCallback mockPingCallback = mock(ClientTransport.PingCallback.class);
try {
client.ping(mockPingCallback, MoreExecutors.directExecutor());
} catch (UnsupportedOperationException ex) {
// Transport doesn't support ping, so this neither passes nor fails.
assumeTrue(false);
}
verify(mockPingCallback, timeout(TIMEOUT_MS)).onSuccess(ArgumentMatchers.anyLong());
stream.cancel(Status.CANCELLED);
}
@Test
public void ping_afterTermination() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportReady();
Status shutdownReason = Status.UNAVAILABLE.withDescription("shutdown called");
client.shutdown(shutdownReason);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
ClientTransport.PingCallback mockPingCallback = mock(ClientTransport.PingCallback.class);
try {
client.ping(mockPingCallback, MoreExecutors.directExecutor());
} catch (UnsupportedOperationException ex) {
// Transport doesn't support ping, so this neither passes nor fails.
assumeTrue(false);
}
verify(mockPingCallback, timeout(TIMEOUT_MS)).onFailure(statusCaptor.capture());
Status status = statusCaptor.getValue();
assertSame(shutdownReason, status);
}
@Test
public void newStream_duringShutdown() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
// Stream prevents termination
ClientStream stream = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
stream.start(clientStreamListener);
client.shutdown(Status.UNAVAILABLE);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportShutdown(any(Status.class));
ClientStream stream2 = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener2 = new ClientStreamListenerBase();
stream2.start(clientStreamListener2);
Status clientStreamStatus2 =
clientStreamListener2.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamListener2.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertCodeEquals(Status.UNAVAILABLE, clientStreamStatus2);
assertNull(clientStreamTracer2.getInboundTrailers());
assertSame(clientStreamStatus2, clientStreamTracer2.getStatus());
// Make sure earlier stream works.
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
// TODO(zdapeng): Increased timeout to 20 seconds to see if flakiness of #2328 persists. Take
// further action after sufficient observation.
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(20 * TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverStreamCreation.stream.close(Status.OK, new Metadata());
assertCodeEquals(Status.OK, clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
}
@Test
public void newStream_afterTermination() throws Exception {
// We expect the same general behavior as duringShutdown, but for some transports (e.g., Netty)
// dealing with afterTermination is harder than duringShutdown.
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportReady();
Status shutdownReason = Status.UNAVAILABLE.withDescription("shutdown called");
client.shutdown(shutdownReason);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportTerminated();
Thread.sleep(100);
ClientStream stream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
stream.start(clientStreamListener);
assertEquals(
shutdownReason, clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
verify(mockClientTransportListener, never()).transportInUse(anyBoolean());
assertNull(clientStreamTracer1.getInboundTrailers());
assertSame(shutdownReason, clientStreamTracer1.getStatus());
// Assert no interactions
assertNull(serverStreamTracer1.getServerCallInfo());
}
@Test
public void transportInUse_balancerRpcsNotCounted() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
// stream1 is created by balancer through Subchannel.asChannel(), which is marked by
// CALL_OPTIONS_RPC_OWNED_BY_BALANCER in CallOptions. It won't be counted for in-use signal.
ClientStream stream1 = client.newStream(
methodDescriptor, new Metadata(),
callOptions.withOption(GrpcUtil.CALL_OPTIONS_RPC_OWNED_BY_BALANCER, Boolean.TRUE),
noopTracers);
ClientStreamListenerBase clientStreamListener1 = new ClientStreamListenerBase();
stream1.start(clientStreamListener1);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation1
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
// stream2 is the normal RPC, and will be counted for in-use
ClientStream stream2 = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener2 = new ClientStreamListenerBase();
stream2.start(clientStreamListener2);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
StreamCreation serverStreamCreation2
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
stream2.halfClose();
verify(mockClientTransportListener, never()).transportInUse(false);
serverStreamCreation2.stream.close(Status.OK, new Metadata());
// As soon as stream2 is closed, even though stream1 is still open, the transport will report
// in-use == false.
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
stream1.halfClose();
serverStreamCreation1.stream.close(Status.OK, new Metadata());
// Verify that the callback has been called only once for true and false respectively
verify(mockClientTransportListener).transportInUse(true);
verify(mockClientTransportListener).transportInUse(false);
}
@Test
public void transportInUse_normalClose() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream stream1 = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener1 = new ClientStreamListenerBase();
stream1.start(clientStreamListener1);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation1
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ClientStream stream2 = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener2 = new ClientStreamListenerBase();
stream2.start(clientStreamListener2);
StreamCreation serverStreamCreation2
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
stream1.halfClose();
serverStreamCreation1.stream.close(Status.OK, new Metadata());
stream2.halfClose();
verify(mockClientTransportListener, never()).transportInUse(false);
serverStreamCreation2.stream.close(Status.OK, new Metadata());
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
// Verify that the callback has been called only once for true and false respectively
verify(mockClientTransportListener).transportInUse(true);
verify(mockClientTransportListener).transportInUse(false);
}
@Test
public void transportInUse_clientCancel() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream stream1 = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener1 = new ClientStreamListenerBase();
stream1.start(clientStreamListener1);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(true);
ClientStream stream2 = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener2 = new ClientStreamListenerBase();
stream2.start(clientStreamListener2);
stream1.cancel(Status.CANCELLED);
verify(mockClientTransportListener, never()).transportInUse(false);
stream2.cancel(Status.CANCELLED);
verify(mockClientTransportListener, timeout(TIMEOUT_MS)).transportInUse(false);
// Verify that the callback has been called only once for true and false respectively
verify(mockClientTransportListener).transportInUse(true);
verify(mockClientTransportListener).transportInUse(false);
}
@Test
public void basicStream() throws Exception {
serverListener =
new MockServerListener(
transport ->
new MockServerTransportListener(transport) {
@Override
public Attributes transportReady(Attributes attributes) {
return super.transportReady(attributes).toBuilder()
.set(ADDITIONAL_TRANSPORT_ATTR_KEY, "additional attribute value")
.build();
}
});
InOrder serverInOrder = inOrder(serverStreamTracerFactory);
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
// This attribute is available right after transport is started
assertThat(((ConnectionClientTransport) client).getAttributes()
.get(GrpcAttributes.ATTR_CLIENT_EAG_ATTRS)).isSameInstanceAs(EAG_ATTRS);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
Metadata clientHeaders = new Metadata();
clientHeaders.put(asciiKey, "client");
clientHeaders.put(asciiKey, "dupvalue");
clientHeaders.put(asciiKey, "dupvalue");
clientHeaders.put(binaryKey, "äbinaryclient");
clientHeaders.put(binaryKey, "dup,value");
Metadata clientHeadersCopy = new Metadata();
clientHeadersCopy.merge(clientHeaders);
ClientStream clientStream = client.newStream(
methodDescriptor, clientHeaders, callOptions, tracers);
assertThat(((TestHeaderClientStreamTracer) clientStreamTracer1).transportAttrs)
.isSameInstanceAs(((ConnectionClientTransport) client).getAttributes());
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertTrue(clientStreamTracer1.awaitOutboundHeaders(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertEquals(methodDescriptor.getFullMethodName(), serverStreamCreation.method);
assertEquals(Lists.newArrayList(clientHeadersCopy.getAll(asciiKey)),
Lists.newArrayList(serverStreamCreation.headers.getAll(asciiKey)));
assertEquals(Lists.newArrayList(clientHeadersCopy.getAll(binaryKey)),
Lists.newArrayList(serverStreamCreation.headers.getAll(binaryKey)));
assertEquals(tracerKeyValue, serverStreamCreation.headers.get(tracerHeaderKey));
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
serverInOrder.verify(serverStreamTracerFactory).newServerStreamTracer(
eq(methodDescriptor.getFullMethodName()), any(Metadata.class));
assertEquals("additional attribute value",
serverStream.getAttributes().get(ADDITIONAL_TRANSPORT_ATTR_KEY));
assertNotNull(serverStream.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR));
assertNotNull(serverStream.getAttributes().get(Grpc.TRANSPORT_ATTR_LOCAL_ADDR));
// This attribute is still available when the transport is connected
assertThat(((ConnectionClientTransport) client).getAttributes()
.get(GrpcAttributes.ATTR_CLIENT_EAG_ATTRS)).isSameInstanceAs(EAG_ATTRS);
serverStream.request(1);
assertTrue(clientStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(clientStream.isReady());
clientStream.writeMessage(methodDescriptor.streamRequest("Hello!"));
assertThat(clientStreamTracer1.nextOutboundEvent()).isEqualTo("outboundMessage(0)");
clientStream.flush();
InputStream message = serverStreamListener.messageQueue.poll(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertEquals("Hello!", methodDescriptor.parseRequest(message));
message.close();
assertThat(clientStreamTracer1.nextOutboundEvent())
.matches("outboundMessageSent\\(0, -?[0-9]+, -?[0-9]+\\)");
if (isEnabledSupportTracingMessageSizes()) {
assertThat(clientStreamTracer1.getOutboundWireSize()).isGreaterThan(0L);
assertThat(clientStreamTracer1.getOutboundUncompressedSize()).isGreaterThan(0L);
}
assertThat(serverStreamTracer1.nextInboundEvent()).isEqualTo("inboundMessage(0)");
assertNull("no additional message expected", serverStreamListener.messageQueue.poll());
clientStream.halfClose();
assertTrue(serverStreamListener.awaitHalfClosed(TIMEOUT_MS, TimeUnit.MILLISECONDS));
if (isEnabledSupportTracingMessageSizes()) {
assertThat(serverStreamTracer1.getInboundWireSize()).isGreaterThan(0L);
assertThat(serverStreamTracer1.getInboundUncompressedSize()).isGreaterThan(0L);
}
assertThat(serverStreamTracer1.nextInboundEvent())
.matches("inboundMessageRead\\(0, -?[0-9]+, -?[0-9]+\\)");
Metadata serverHeaders = new Metadata();
serverHeaders.put(asciiKey, "server");
serverHeaders.put(asciiKey, "dupvalue");
serverHeaders.put(asciiKey, "dupvalue");
serverHeaders.put(binaryKey, "äbinaryserver");
serverHeaders.put(binaryKey, "dup,value");
Metadata serverHeadersCopy = new Metadata();
serverHeadersCopy.merge(serverHeaders);
serverStream.writeHeaders(serverHeaders, true);
Metadata headers = clientStreamListener.awaitHeaders(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(headers);
assertAsciiMetadataValuesEqual(serverHeadersCopy.getAll(asciiKey), headers.getAll(asciiKey));
assertEquals(
Lists.newArrayList(serverHeadersCopy.getAll(binaryKey)),
Lists.newArrayList(headers.getAll(binaryKey)));
clientStream.request(1);
assertTrue(serverStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStream.isReady());
serverStream.writeMessage(methodDescriptor.streamResponse("Hi. Who are you?"));
assertThat(serverStreamTracer1.nextOutboundEvent()).isEqualTo("outboundMessage(0)");
serverStream.flush();
message = clientStreamListener.messageQueue.poll(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull("message expected", message);
assertThat(serverStreamTracer1.nextOutboundEvent())
.matches("outboundMessageSent\\(0, -?[0-9]+, -?[0-9]+\\)");
if (isEnabledSupportTracingMessageSizes()) {
assertThat(serverStreamTracer1.getOutboundWireSize()).isGreaterThan(0L);
assertThat(serverStreamTracer1.getOutboundUncompressedSize()).isGreaterThan(0L);
}
assertTrue(clientStreamTracer1.getInboundHeaders());
assertThat(clientStreamTracer1.nextInboundEvent()).isEqualTo("inboundMessage(0)");
assertEquals("Hi. Who are you?", methodDescriptor.parseResponse(message));
assertThat(clientStreamTracer1.nextInboundEvent())
.matches("inboundMessageRead\\(0, -?[0-9]+, -?[0-9]+\\)");
if (isEnabledSupportTracingMessageSizes()) {
assertThat(clientStreamTracer1.getInboundWireSize()).isGreaterThan(0L);
assertThat(clientStreamTracer1.getInboundUncompressedSize()).isGreaterThan(0L);
}
message.close();
assertNull("no additional message expected", clientStreamListener.messageQueue.poll());
Status status = Status.OK.withDescription("That was normal");
Metadata trailers = new Metadata();
trailers.put(asciiKey, "trailers");
trailers.put(asciiKey, "dupvalue");
trailers.put(asciiKey, "dupvalue");
trailers.put(binaryKey, "äbinarytrailers");
trailers.put(binaryKey, "dup,value");
serverStream.close(status, trailers);
assertNull(serverStreamTracer1.nextInboundEvent());
assertNull(serverStreamTracer1.nextOutboundEvent());
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertSame(status, serverStreamTracer1.getStatus());
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertNull(clientStreamTracer1.nextInboundEvent());
assertNull(clientStreamTracer1.nextOutboundEvent());
assertEquals(status.getCode(), clientStreamStatus.getCode());
assertEquals(status.getDescription(), clientStreamStatus.getDescription());
assertAsciiMetadataValuesEqual(
trailers.getAll(asciiKey), clientStreamTrailers.getAll(asciiKey));
assertEquals(
Lists.newArrayList(trailers.getAll(binaryKey)),
Lists.newArrayList(clientStreamTrailers.getAll(binaryKey)));
}
@Test
@SuppressWarnings("deprecation")
public void authorityPropagation() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientHeaders = new Metadata();
ClientStream clientStream = client.newStream(
methodDescriptor, clientHeaders, callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
assertEquals(testAuthority(server), serverStream.getAuthority());
}
private void assertAsciiMetadataValuesEqual(Iterable<String> expected, Iterable<String> actural) {
StringBuilder sbExpected = new StringBuilder();
for (String str : expected) {
sbExpected.append(str).append(",");
}
StringBuilder sbActual = new StringBuilder();
for (String str : actural) {
sbActual.append(str).append(",");
}
assertEquals(sbExpected.toString(), sbActual.toString());
}
@Test
public void zeroMessageStream() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
clientStream.halfClose();
assertTrue(serverStreamListener.awaitHalfClosed(TIMEOUT_MS, TimeUnit.MILLISECONDS));
serverStream.writeHeaders(new Metadata(), true);
assertNotNull(clientStreamListener.awaitHeaders(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status status = Status.OK.withDescription("Nice talking to you");
serverStream.close(status, new Metadata());
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamTrailers);
assertEquals(status.getCode(), clientStreamStatus.getCode());
assertEquals(status.getDescription(), clientStreamStatus.getDescription());
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertTrue(clientStreamTracer1.getInboundHeaders());
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertSame(status, serverStreamTracer1.getStatus());
}
@Test
public void earlyServerClose_withServerHeaders() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
serverStream.writeHeaders(new Metadata(), true);
assertNotNull(clientStreamListener.awaitHeaders(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status strippedStatus = Status.OK.withDescription("Hello. Goodbye.");
Status status = strippedStatus.withCause(new Exception());
serverStream.close(status, new Metadata());
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamTrailers);
checkClientStatus(status, clientStreamStatus);
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertTrue(clientStreamTracer1.getInboundHeaders());
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertSame(status, serverStreamTracer1.getStatus());
}
@Test
public void earlyServerClose_noServerHeaders() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status strippedStatus = Status.OK.withDescription("Hellogoodbye");
Status status = strippedStatus.withCause(new Exception());
Metadata trailers = new Metadata();
trailers.put(asciiKey, "trailers");
trailers.put(asciiKey, "dupvalue");
trailers.put(asciiKey, "dupvalue");
trailers.put(binaryKey, "äbinarytrailers");
serverStream.close(status, trailers);
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
checkClientStatus(status, clientStreamStatus);
assertEquals(
Lists.newArrayList(trailers.getAll(asciiKey)),
Lists.newArrayList(clientStreamTrailers.getAll(asciiKey)));
assertEquals(
Lists.newArrayList(trailers.getAll(binaryKey)),
Lists.newArrayList(clientStreamTrailers.getAll(binaryKey)));
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertSame(status, serverStreamTracer1.getStatus());
}
@Test
public void earlyServerClose_serverFailure() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status strippedStatus = Status.INTERNAL.withDescription("I'm not listening");
Status status = strippedStatus.withCause(new Exception());
serverStream.close(status, new Metadata());
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamTrailers);
checkClientStatus(status, clientStreamStatus);
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertSame(status, serverStreamTracer1.getStatus());
}
@Test
public void earlyServerClose_serverFailure_withClientCancelOnListenerClosed() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
final ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase() {
@Override
public void closed(
Status status, RpcProgress rpcProgress, Metadata trailers) {
super.closed(status, rpcProgress, trailers);
// This simulates the blocking calls which can trigger clientStream.cancel().
clientStream.cancel(Status.CANCELLED.withCause(status.asRuntimeException()));
}
};
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status strippedStatus = Status.INTERNAL.withDescription("I'm not listening");
Status status = strippedStatus.withCause(new Exception());
serverStream.close(status, new Metadata());
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
Metadata clientStreamTrailers =
clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamTrailers);
checkClientStatus(status, clientStreamStatus);
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertSame(clientStreamTrailers, clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
assertSame(status, serverStreamTracer1.getStatus());
}
@Test
public void clientCancel() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status status = Status.CANCELLED.withDescription("Nevermind").withCause(new Exception());
clientStream.cancel(status);
assertEquals(status, clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status serverStatus = serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotEquals(Status.Code.OK, serverStatus.getCode());
// Cause should not be transmitted between client and server by default
assertNull(serverStatus.getCause());
clientStream.cancel(status);
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertNull(clientStreamTracer1.getInboundTrailers());
assertSame(status, clientStreamTracer1.getStatus());
assertSame(serverStatus, serverStreamTracer1.getStatus());
}
@Test
public void clientCancelFromWithinMessageRead() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
final SettableFuture<Boolean> closedCalled = SettableFuture.create();
final ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
final Status status = Status.CANCELLED.withDescription("nevermind");
clientStream.start(new ClientStreamListener() {
private boolean messageReceived = false;
@Override
public void headersRead(Metadata headers) {
}
@Override
public void closed(
Status status, RpcProgress rpcProgress, Metadata trailers) {
assertEquals(Status.CANCELLED.getCode(), status.getCode());
assertEquals("nevermind", status.getDescription());
closedCalled.set(true);
}
@Override
public void messagesAvailable(MessageProducer producer) {
InputStream message;
while ((message = producer.next()) != null) {
assertFalse("too many messages received", messageReceived);
messageReceived = true;
assertEquals("foo", methodDescriptor.parseResponse(message));
clientStream.cancel(status);
}
}
@Override
public void onReady() {
}
});
clientStream.halfClose();
clientStream.request(1);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertEquals(methodDescriptor.getFullMethodName(), serverStreamCreation.method);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
assertTrue(serverStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStream.isReady());
serverStream.writeHeaders(new Metadata(), true);
serverStream.writeMessage(methodDescriptor.streamRequest("foo"));
serverStream.flush();
// Block until closedCalled was set.
closedCalled.get(5, TimeUnit.SECONDS);
serverStream.close(Status.OK, new Metadata());
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertTrue(clientStreamTracer1.getInboundHeaders());
if (isEnabledSupportTracingMessageSizes()) {
assertThat(clientStreamTracer1.getInboundWireSize()).isGreaterThan(0L);
assertThat(clientStreamTracer1.getInboundUncompressedSize()).isGreaterThan(0L);
assertThat(serverStreamTracer1.getOutboundWireSize()).isGreaterThan(0L);
assertThat(serverStreamTracer1.getOutboundUncompressedSize()).isGreaterThan(0L);
}
assertNull(clientStreamTracer1.getInboundTrailers());
assertSame(status, clientStreamTracer1.getStatus());
// There is a race between client cancelling and server closing. The final status seen by the
// server is non-deterministic.
assertTrue(serverStreamTracer1.await(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(serverStreamTracer1.getStatus());
}
@Test
public void serverCancel() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
Status status = Status.DEADLINE_EXCEEDED.withDescription("It was bound to happen")
.withCause(new Exception());
serverStream.cancel(status);
assertEquals(status, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Presently we can't sent much back to the client in this case. Verify that is the current
// behavior for consistency between transports.
assertCodeEquals(Status.CANCELLED, clientStreamStatus);
// Cause should not be transmitted between server and client
assertNull(clientStreamStatus.getCause());
assertTrue(clientStreamTracer1.getOutboundHeaders());
assertNull(clientStreamTracer1.getInboundTrailers());
assertSame(clientStreamStatus, clientStreamTracer1.getStatus());
verify(serverStreamTracerFactory).newServerStreamTracer(anyString(), any(Metadata.class));
assertSame(status, serverStreamTracer1.getStatus());
// Second cancellation shouldn't trigger additional callbacks
serverStream.cancel(status);
doPingPong(serverListener);
}
@Test
public void flowControlPushBack() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener =
serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream =
client.newStream(methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation =
serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertEquals(methodDescriptor.getFullMethodName(), serverStreamCreation.method);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
serverStream.writeHeaders(new Metadata(), true);
String largeMessage = newString(1024);
serverStream.request(1);
assertTrue(clientStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(clientStream.isReady());
final int maxToSend = 10 * 1024;
int clientSent;
// Verify that flow control will push back on client.
for (clientSent = 0; clientStream.isReady(); clientSent++) {
if (clientSent > maxToSend) {
// It seems like flow control isn't working. _Surely_ flow control would have pushed-back
// already. If this is normal, please configure the transport to buffer less.
fail("Too many messages sent before isReady() returned false");
}
clientStream.writeMessage(methodDescriptor.streamRequest(largeMessage));
clientStream.flush();
}
assertTrue(clientSent > 0);
// Make sure there are at least a few messages buffered.
for (; clientSent < 5; clientSent++) {
clientStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
clientStream.flush();
}
doPingPong(serverListener);
int serverReceived = verifyMessageCountAndClose(serverStreamListener.messageQueue, 1);
clientStream.request(1);
assertTrue(serverStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStream.isReady());
int serverSent;
// Verify that flow control will push back on server.
for (serverSent = 0; serverStream.isReady(); serverSent++) {
if (serverSent > maxToSend) {
// It seems like flow control isn't working. _Surely_ flow control would have pushed-back
// already. If this is normal, please configure the transport to buffer less.
fail("Too many messages sent before isReady() returned false");
}
serverStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
serverStream.flush();
}
assertTrue(serverSent > 0);
// Make sure there are at least a few messages buffered.
for (; serverSent < 5; serverSent++) {
serverStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
serverStream.flush();
}
doPingPong(serverListener);
int clientReceived = verifyMessageCountAndClose(clientStreamListener.messageQueue, 1);
serverStream.request(3);
clientStream.request(3);
doPingPong(serverListener);
clientReceived += verifyMessageCountAndClose(clientStreamListener.messageQueue, 3);
serverReceived += verifyMessageCountAndClose(serverStreamListener.messageQueue, 3);
// Request the rest
serverStream.request(clientSent);
clientStream.request(serverSent);
clientReceived +=
verifyMessageCountAndClose(clientStreamListener.messageQueue, serverSent - clientReceived);
serverReceived +=
verifyMessageCountAndClose(serverStreamListener.messageQueue, clientSent - serverReceived);
assertTrue(clientStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(clientStream.isReady());
assertTrue(serverStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStream.isReady());
// Request four more
for (int i = 0; i < 5; i++) {
clientStream.writeMessage(methodDescriptor.streamRequest(largeMessage));
clientStream.flush();
serverStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
serverStream.flush();
}
doPingPong(serverListener);
clientReceived += verifyMessageCountAndClose(clientStreamListener.messageQueue, 4);
serverReceived += verifyMessageCountAndClose(serverStreamListener.messageQueue, 4);
// Drain exactly how many messages are left
serverStream.request(1);
clientStream.request(1);
clientReceived += verifyMessageCountAndClose(clientStreamListener.messageQueue, 1);
serverReceived += verifyMessageCountAndClose(serverStreamListener.messageQueue, 1);
// And now check that the streams can still complete gracefully
clientStream.writeMessage(methodDescriptor.streamRequest(largeMessage));
clientStream.flush();
clientStream.halfClose();
doPingPong(serverListener);
assertFalse(serverStreamListener.isHalfClosed());
serverStream.request(1);
serverReceived += verifyMessageCountAndClose(serverStreamListener.messageQueue, 1);
assertEquals(clientSent + 6, serverReceived);
assertTrue(serverStreamListener.awaitHalfClosed(TIMEOUT_MS, TimeUnit.MILLISECONDS));
serverStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
serverStream.flush();
Status status = Status.OK.withDescription("... quite a lengthy discussion");
serverStream.close(status, new Metadata());
doPingPong(serverListener);
assertFalse(clientStreamListener.isClosed());
clientStream.request(1);
clientReceived += verifyMessageCountAndClose(clientStreamListener.messageQueue, 1);
assertEquals(serverSent + 6, clientReceived);
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertEquals(status.getCode(), clientStreamStatus.getCode());
assertEquals(status.getDescription(), clientStreamStatus.getDescription());
}
@Test
public void flowControlDoesNotDeadlockLargeMessage() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener =
serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
ClientStream clientStream =
client.newStream(methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation =
serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertEquals(methodDescriptor.getFullMethodName(), serverStreamCreation.method);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
serverStream.writeHeaders(new Metadata(), true);
String largeMessage = newString(TEST_FLOW_CONTROL_WINDOW + 1);
serverStream.request(1);
assertTrue(clientStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(clientStream.isReady());
clientStream.writeMessage(methodDescriptor.streamRequest(largeMessage));
clientStream.flush();
doPingPong(serverListener);
verifyMessageCountAndClose(serverStreamListener.messageQueue, 1);
clientStream.request(1);
assertTrue(serverStreamListener.awaitOnReadyAndDrain(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertTrue(serverStream.isReady());
serverStream.writeMessage(methodDescriptor.streamResponse(largeMessage));
serverStream.flush();
doPingPong(serverListener);
verifyMessageCountAndClose(clientStreamListener.messageQueue, 1);
// And now check that the streams can still complete normally.
clientStream.halfClose();
doPingPong(serverListener);
serverStream.request(1);
assertTrue(serverStreamListener.awaitHalfClosed(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status status = Status.OK.withDescription("... quite a lengthy discussion");
serverStream.close(status, new Metadata());
doPingPong(serverListener);
clientStream.request(1);
assertCodeEquals(Status.OK, serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
Status clientStreamStatus = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertEquals(status.getCode(), clientStreamStatus.getCode());
assertEquals(status.getDescription(), clientStreamStatus.getDescription());
}
private int verifyMessageCountAndClose(BlockingQueue<InputStream> messageQueue, int count)
throws Exception {
InputStream message;
for (int i = 0; i < count; i++) {
message = messageQueue.poll(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNotNull(message);
message.close();
}
assertNull("no additional message expected", messageQueue.poll());
return count;
}
@Test
public void messageProducerOnlyProducesRequestedMessages() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener =
serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
// Start an RPC.
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation =
serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertEquals(methodDescriptor.getFullMethodName(), serverStreamCreation.method);
// Have the client send two messages.
clientStream.writeMessage(methodDescriptor.streamRequest("MESSAGE"));
clientStream.writeMessage(methodDescriptor.streamRequest("MESSAGE"));
clientStream.flush();
doPingPong(serverListener);
// Verify server only receives one message if that's all it requests.
serverStreamCreation.stream.request(1);
verifyMessageCountAndClose(serverStreamCreation.listener.messageQueue, 1);
}
@Test
public void interactionsAfterServerStreamCloseAreNoops() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
// boilerplate
ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation server
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
// setup
clientStream.request(1);
server.stream.close(Status.INTERNAL, new Metadata());
assertNotNull(clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Ensure that for a closed ServerStream, interactions are noops
server.stream.writeHeaders(new Metadata(), true);
server.stream.writeMessage(methodDescriptor.streamResponse("response"));
server.stream.close(Status.INTERNAL, new Metadata());
// Make sure new streams still work properly
doPingPong(serverListener);
}
@Test
public void interactionsAfterClientStreamCancelAreNoops() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
// boilerplate
ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListener clientListener = mock(ClientStreamListener.class);
clientStream.start(clientListener);
StreamCreation server
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
// setup
server.stream.request(1);
clientStream.cancel(Status.UNKNOWN);
assertNotNull(server.listener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Ensure that for a cancelled ClientStream, interactions are noops
clientStream.writeMessage(methodDescriptor.streamRequest("request"));
clientStream.halfClose();
clientStream.cancel(Status.UNKNOWN);
// Make sure new streams still work properly
doPingPong(serverListener);
}
// Not all transports support the tracer yet
protected boolean haveTransportTracer() {
return false;
}
@Test
public void transportTracer_streamStarted() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
if (!haveTransportTracer()) {
return;
}
// start first stream
long serverFirstTimestampNanos;
long clientFirstTimestampNanos;
{
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.streamsStarted);
assertEquals(0, serverBefore.lastRemoteStreamCreatedTimeNanos);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.streamsStarted);
assertEquals(0, clientBefore.lastRemoteStreamCreatedTimeNanos);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation = serverTransportListener
.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.streamsStarted);
serverFirstTimestampNanos = serverAfter.lastRemoteStreamCreatedTimeNanos;
assertEquals(fakeCurrentTimeNanos(), serverAfter.lastRemoteStreamCreatedTimeNanos);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.streamsStarted);
clientFirstTimestampNanos = clientAfter.lastLocalStreamCreatedTimeNanos;
assertEquals(fakeCurrentTimeNanos(), clientFirstTimestampNanos);
ServerStream serverStream = serverStreamCreation.stream;
serverStream.close(Status.OK, new Metadata());
}
final long elapsedMillis = 100;
advanceClock(100, TimeUnit.MILLISECONDS);
// start second stream
{
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverBefore.streamsStarted);
TransportStats clientBefore = getTransportStats(client);
assertEquals(1, clientBefore.streamsStarted);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, noopTracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
StreamCreation serverStreamCreation = serverTransportListener
.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(2, serverAfter.streamsStarted);
assertEquals(
TimeUnit.MILLISECONDS.toNanos(elapsedMillis),
serverAfter.lastRemoteStreamCreatedTimeNanos - serverFirstTimestampNanos);
assertEquals(fakeCurrentTimeNanos(), serverAfter.lastRemoteStreamCreatedTimeNanos);
TransportStats clientAfter = getTransportStats(client);
assertEquals(2, clientAfter.streamsStarted);
assertEquals(
TimeUnit.MILLISECONDS.toNanos(elapsedMillis),
clientAfter.lastLocalStreamCreatedTimeNanos - clientFirstTimestampNanos);
assertEquals(fakeCurrentTimeNanos(), clientAfter.lastLocalStreamCreatedTimeNanos);
ServerStream serverStream = serverStreamCreation.stream;
serverStream.close(Status.OK, new Metadata());
}
}
@Test
public void transportTracer_server_streamEnded_ok() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
if (!haveTransportTracer()) {
return;
}
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.streamsSucceeded);
assertEquals(0, serverBefore.streamsFailed);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.streamsSucceeded);
assertEquals(0, clientBefore.streamsFailed);
clientStream.halfClose();
serverStream.close(Status.OK, new Metadata());
// do not validate stats until close() has been called on client
assertNotNull(clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.streamsSucceeded);
assertEquals(0, serverAfter.streamsFailed);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.streamsSucceeded);
assertEquals(0, clientAfter.streamsFailed);
}
@Test
public void transportTracer_server_streamEnded_nonOk() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
if (!haveTransportTracer()) {
return;
}
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.streamsFailed);
assertEquals(0, serverBefore.streamsSucceeded);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.streamsFailed);
assertEquals(0, clientBefore.streamsSucceeded);
serverStream.close(Status.UNKNOWN, new Metadata());
// do not validate stats until close() has been called on client
assertNotNull(clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.streamsFailed);
assertEquals(0, serverAfter.streamsSucceeded);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.streamsFailed);
assertEquals(0, clientAfter.streamsSucceeded);
client.shutdown(Status.UNAVAILABLE);
}
@Test
public void transportTracer_client_streamEnded_nonOk() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener =
serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation =
serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
if (!haveTransportTracer()) {
return;
}
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.streamsFailed);
assertEquals(0, serverBefore.streamsSucceeded);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.streamsFailed);
assertEquals(0, clientBefore.streamsSucceeded);
clientStream.cancel(Status.UNKNOWN);
// do not validate stats until close() has been called on server
assertNotNull(serverStreamCreation.listener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.streamsFailed);
assertEquals(0, serverAfter.streamsSucceeded);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.streamsFailed);
assertEquals(0, clientAfter.streamsSucceeded);
}
@Test
public void transportTracer_server_receive_msg() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
if (!haveTransportTracer()) {
return;
}
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.messagesReceived);
assertEquals(0, serverBefore.lastMessageReceivedTimeNanos);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.messagesSent);
assertEquals(0, clientBefore.lastMessageSentTimeNanos);
serverStream.request(1);
clientStream.writeMessage(methodDescriptor.streamRequest("request"));
clientStream.flush();
clientStream.halfClose();
verifyMessageCountAndClose(serverStreamListener.messageQueue, 1);
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.messagesReceived);
assertEquals(fakeCurrentTimeNanos(), serverAfter.lastMessageReceivedTimeNanos);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.messagesSent);
assertEquals(fakeCurrentTimeNanos(), clientAfter.lastMessageSentTimeNanos);
serverStream.close(Status.OK, new Metadata());
}
@Test
public void transportTracer_server_send_msg() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
if (!haveTransportTracer()) {
return;
}
TransportStats serverBefore = getTransportStats(serverTransportListener.transport);
assertEquals(0, serverBefore.messagesSent);
assertEquals(0, serverBefore.lastMessageSentTimeNanos);
TransportStats clientBefore = getTransportStats(client);
assertEquals(0, clientBefore.messagesReceived);
assertEquals(0, clientBefore.lastMessageReceivedTimeNanos);
clientStream.request(1);
serverStream.writeHeaders(new Metadata(), true);
serverStream.writeMessage(methodDescriptor.streamResponse("response"));
serverStream.flush();
verifyMessageCountAndClose(clientStreamListener.messageQueue, 1);
TransportStats serverAfter = getTransportStats(serverTransportListener.transport);
assertEquals(1, serverAfter.messagesSent);
assertEquals(fakeCurrentTimeNanos(), serverAfter.lastMessageSentTimeNanos);
TransportStats clientAfter = getTransportStats(client);
assertEquals(1, clientAfter.messagesReceived);
assertEquals(fakeCurrentTimeNanos(), clientAfter.lastMessageReceivedTimeNanos);
serverStream.close(Status.OK, new Metadata());
}
@Test
public void socketStats() throws Exception {
server.start(serverListener);
ManagedClientTransport client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
SocketAddress serverAddress = clientStream.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR);
SocketAddress clientAddress = serverStream.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR);
SocketStats clientSocketStats = client.getStats().get();
assertEquals(
"clientLocal " + clientStream.getAttributes(), clientAddress, clientSocketStats.local);
assertEquals(
"clientRemote " + clientStream.getAttributes(), serverAddress, clientSocketStats.remote);
// very basic sanity check that socket options are populated
assertNotNull(clientSocketStats.socketOptions.lingerSeconds);
assertTrue(clientSocketStats.socketOptions.others.containsKey("SO_SNDBUF"));
SocketStats serverSocketStats = serverTransportListener.transport.getStats().get();
assertEquals(
"serverLocal " + serverStream.getAttributes(), serverAddress, serverSocketStats.local);
assertEquals(
"serverRemote " + serverStream.getAttributes(), clientAddress, serverSocketStats.remote);
// very basic sanity check that socket options are populated
assertNotNull(serverSocketStats.socketOptions.lingerSeconds);
assertTrue(serverSocketStats.socketOptions.others.containsKey("SO_SNDBUF"));
}
/** This assumes the server limits metadata size to GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE. */
@Test
public void serverChecksInboundMetadataSize() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
Metadata tooLargeMetadata = new Metadata();
tooLargeMetadata.put(
Metadata.Key.of("foo-bin", Metadata.BINARY_BYTE_MARSHALLER),
new byte[GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE]);
ClientStream clientStream = client.newStream(
methodDescriptor, tooLargeMetadata, callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
clientStream.writeMessage(methodDescriptor.streamRequest("foo"));
clientStream.halfClose();
clientStream.request(1);
// Server shouldn't have created a stream, so nothing to clean up on server-side
// If this times out, the server probably isn't noticing the metadata size
Status status = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
List<Status.Code> codeOptions = Arrays.asList(
Status.Code.UNKNOWN, Status.Code.RESOURCE_EXHAUSTED, Status.Code.INTERNAL);
if (!codeOptions.contains(status.getCode())) {
fail("Status code was not expected: " + status);
}
}
/** This assumes the client limits metadata size to GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE. */
@Test
public void clientChecksInboundMetadataSize_header() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
Metadata tooLargeMetadata = new Metadata();
tooLargeMetadata.put(
Metadata.Key.of("foo-bin", Metadata.BINARY_BYTE_MARSHALLER),
new byte[GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE]);
ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
clientStream.writeMessage(methodDescriptor.streamRequest("foo"));
clientStream.halfClose();
clientStream.request(1);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverStreamCreation.stream.request(1);
serverStreamCreation.stream.writeHeaders(tooLargeMetadata, true);
serverStreamCreation.stream.writeMessage(methodDescriptor.streamResponse("response"));
serverStreamCreation.stream.close(Status.OK, new Metadata());
Status status = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
List<Status.Code> codeOptions = Arrays.asList(
Status.Code.UNKNOWN, Status.Code.RESOURCE_EXHAUSTED, Status.Code.INTERNAL);
if (!codeOptions.contains(status.getCode())) {
fail("Status code was not expected: " + status);
}
assertFalse(clientStreamListener.hasHeaders());
}
/** This assumes the client limits metadata size to GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE. */
@Test
public void clientChecksInboundMetadataSize_trailer() throws Exception {
server.start(serverListener);
client = newClientTransport(server);
startTransport(client, mockClientTransportListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverTransport = serverTransportListener.transport;
Metadata.Key<String> tellTaleKey
= Metadata.Key.of("tell-tale", Metadata.ASCII_STRING_MARSHALLER);
Metadata tooLargeMetadata = new Metadata();
tooLargeMetadata.put(tellTaleKey, "true");
tooLargeMetadata.put(
Metadata.Key.of("foo-bin", Metadata.BINARY_BYTE_MARSHALLER),
new byte[GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE]);
ClientStream clientStream =
client.newStream(
methodDescriptor, new Metadata(), callOptions, tracers);
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
clientStream.writeMessage(methodDescriptor.streamRequest("foo"));
clientStream.halfClose();
clientStream.request(1);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
serverStreamCreation.stream.request(1);
serverStreamCreation.stream.writeHeaders(new Metadata(), true);
serverStreamCreation.stream.writeMessage(methodDescriptor.streamResponse("response"));
serverStreamCreation.stream.close(Status.OK, tooLargeMetadata);
Status status = clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS);
List<Status.Code> codeOptions = Arrays.asList(
Status.Code.UNKNOWN, Status.Code.RESOURCE_EXHAUSTED, Status.Code.INTERNAL);
if (!codeOptions.contains(status.getCode())) {
fail("Status code was not expected: " + status);
}
Metadata metadata = clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS);
assertNull(metadata.get(tellTaleKey));
}
/**
* Helper that simply does an RPC. It can be used similar to a sleep for negative testing: to give
* time for actions _not_ to happen. Since it is based on doing an actual RPC with actual
* callbacks, it generally provides plenty of time for Runnables to execute. But it is also faster
* on faster machines and more reliable on slower machines.
*/
private void doPingPong(MockServerListener serverListener) throws Exception {
ManagedClientTransport client = newClientTransport(server);
ManagedClientTransport.Listener listener = mock(ManagedClientTransport.Listener.class);
startTransport(client, listener);
ClientStream clientStream = client.newStream(
methodDescriptor, new Metadata(), callOptions,
new ClientStreamTracer[] { new ClientStreamTracer() {} });
ClientStreamListenerBase clientStreamListener = new ClientStreamListenerBase();
clientStream.start(clientStreamListener);
MockServerTransportListener serverTransportListener
= serverListener.takeListenerOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
StreamCreation serverStreamCreation
= serverTransportListener.takeStreamOrFail(TIMEOUT_MS, TimeUnit.MILLISECONDS);
ServerStream serverStream = serverStreamCreation.stream;
ServerStreamListenerBase serverStreamListener = serverStreamCreation.listener;
serverStream.close(Status.OK, new Metadata());
assertNotNull(clientStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(clientStreamListener.awaitTrailers(TIMEOUT_MS, TimeUnit.MILLISECONDS));
assertNotNull(serverStreamListener.awaitClose(TIMEOUT_MS, TimeUnit.MILLISECONDS));
client.shutdown(Status.UNAVAILABLE);
}
/**
* Only assert that the Status.Code matches, but provide the entire actual result in case the
* assertion fails.
*/
private static void assertCodeEquals(String message, Status expected, Status actual) {
if (expected == null) {
fail("expected should not be null");
}
if (actual == null || !expected.getCode().equals(actual.getCode())) {
assertEquals(message, expected, actual);
}
}
private static void assertCodeEquals(Status expected, Status actual) {
assertCodeEquals(null, expected, actual);
}
private static void assertStatusEquals(Status expected, Status actual) {
if (expected == null) {
fail("expected should not be null");
}
if (actual == null || !expected.getCode().equals(actual.getCode())
|| !Objects.equal(expected.getDescription(), actual.getDescription())
|| !Objects.equal(expected.getCause(), actual.getCause())) {
assertEquals(expected, actual);
}
}
/**
* Verifies that the client status is as expected. By default, the code and description should
* be present, and the cause should be stripped away.
*/
private static void checkClientStatus(Status expectedStatus, Status clientStreamStatus) {
if (!clientStreamStatus.isOk() && clientStreamStatus.getCode() != expectedStatus.getCode()) {
System.out.println("Full Status: " + clientStreamStatus);
}
assertEquals(expectedStatus.getCode(), clientStreamStatus.getCode());
assertEquals(expectedStatus.getDescription(), clientStreamStatus.getDescription());
assertNull(clientStreamStatus.getCause());
}
static boolean waitForFuture(Future<?> future, long timeout, TimeUnit unit)
throws InterruptedException {
try {
future.get(timeout, unit);
} catch (ExecutionException ex) {
throw new AssertionError(ex);
} catch (TimeoutException ex) {
return false;
}
return true;
}
protected static void runIfNotNull(Runnable runnable) {
if (runnable != null) {
runnable.run();
}
}
protected static void startTransport(
ManagedClientTransport clientTransport,
ManagedClientTransport.Listener listener) {
runIfNotNull(clientTransport.start(listener));
verify(listener, timeout(TIMEOUT_MS)).filterTransport(any());
verify(listener, timeout(TIMEOUT_MS)).transportReady();
}
private final | AbstractTransportTest |
java | dropwizard__dropwizard | dropwizard-jersey/src/main/java/io/dropwizard/jersey/jackson/JacksonFeature.java | {
"start": 376,
"end": 758
} | class ____ implements Feature {
private final ObjectMapper mapper;
public JacksonFeature(ObjectMapper mapper) {
this.mapper = mapper;
}
@Override
public boolean configure(FeatureContext context) {
context.register(new JacksonMessageBodyProvider(mapper), MessageBodyReader.class, MessageBodyWriter.class);
return true;
}
}
| JacksonFeature |
java | apache__camel | tests/camel-itest/src/test/java/org/apache/camel/itest/issues/BaseClass.java | {
"start": 973,
"end": 1200
} | class ____ implements MessageListener {
private static final Logger LOG = LoggerFactory.getLogger(BaseClass.class);
@Override
public void onMessage(Message message) {
LOG.info("base called");
}
}
| BaseClass |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/context/LifecycleAutoConfigurationTests.java | {
"start": 3385,
"end": 3709
} | class ____ {
@Bean(name = AbstractApplicationContext.LIFECYCLE_PROCESSOR_BEAN_NAME)
DefaultLifecycleProcessor customLifecycleProcessor() {
DefaultLifecycleProcessor processor = new DefaultLifecycleProcessor();
processor.setTimeoutPerShutdownPhase(5000);
return processor;
}
}
}
| LifecycleProcessorConfiguration |
java | apache__camel | components/camel-smpp/src/main/java/org/apache/camel/component/smpp/SmppSubmitSmCommand.java | {
"start": 1563,
"end": 10567
} | class ____ extends SmppSmCommand {
public SmppSubmitSmCommand(SMPPSession session, SmppConfiguration config) {
super(session, config);
}
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitSm[] submitSms = createSubmitSm(exchange);
List<String> messageIDs = new ArrayList<>(submitSms.length);
String messageID = null;
for (int i = 0; i < submitSms.length; i++) {
SubmitSm submitSm = submitSms[i];
messageID = null;
if (log.isDebugEnabled()) {
log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId());
}
try {
SubmitSmResult result = session.submitShortMessage(
submitSm.getServiceType(),
TypeOfNumber.valueOf(submitSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()),
submitSm.getSourceAddr(),
TypeOfNumber.valueOf(submitSm.getDestAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()),
submitSm.getDestAddress(),
new ESMClass(submitSm.getEsmClass()),
submitSm.getProtocolId(),
submitSm.getPriorityFlag(),
submitSm.getScheduleDeliveryTime(),
submitSm.getValidityPeriod(),
new RegisteredDelivery(submitSm.getRegisteredDelivery()),
submitSm.getReplaceIfPresent(),
DataCodings.newInstance(submitSm.getDataCoding()),
(byte) 0,
submitSm.getShortMessage(),
submitSm.getOptionalParameters());
if (result != null) {
messageID = result.getMessageId();
}
} catch (Exception e) {
throw new SmppException(e);
}
if (messageID != null) {
messageIDs.add(messageID);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent short message for exchange id '{}' and received message ids '{}'",
exchange.getExchangeId(), messageIDs);
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
}
protected SubmitSm[] createSubmitSm(Exchange exchange) throws SmppException {
Message message = exchange.getIn();
SubmitSm template = createSubmitSmTemplate(exchange);
byte[][] segments = splitBody(message);
// FIXME: undocumented header
ESMClass esmClass = message.getHeader(SmppConstants.ESM_CLASS, ESMClass.class);
if (esmClass != null) {
template.setEsmClass(esmClass.value());
} else if (segments.length > 1) {
// multipart message
template.setEsmClass(new ESMClass(MessageMode.DEFAULT, MessageType.DEFAULT, GSMSpecificFeature.UDHI).value());
}
SubmitSm[] submitSms = new SubmitSm[segments.length];
for (int i = 0; i < segments.length; i++) {
SubmitSm submitSm = SmppUtils.copySubmitSm(template);
submitSm.setShortMessage(segments[i]);
submitSms[i] = submitSm;
}
setRegisterDeliveryReceiptFlag(submitSms, message);
return submitSms;
}
protected void setRegisterDeliveryReceiptFlag(SubmitSm[] submitSms, Message message) {
byte specifiedDeliveryFlag = getRegisterDeliveryFlag(message);
byte flag;
if (getRequestsSingleDLR(message)) {
// Disable DLRs
flag = SMSCDeliveryReceipt.DEFAULT.value();
} else {
flag = specifiedDeliveryFlag;
}
for (int i = 0; i < submitSms.length - 1; i++) {
submitSms[i].setRegisteredDelivery(flag);
}
submitSms[submitSms.length - 1].setRegisteredDelivery(specifiedDeliveryFlag);
}
@SuppressWarnings({ "unchecked" })
protected SubmitSm createSubmitSmTemplate(Exchange exchange) {
Message in = exchange.getIn();
SubmitSm submitSm = new SubmitSm();
if (in.getHeaders().containsKey(SmppConstants.DATA_CODING)) {
submitSm.setDataCoding(in.getHeader(SmppConstants.DATA_CODING, Byte.class));
} else if (in.getHeaders().containsKey(SmppConstants.ALPHABET)) {
submitSm.setDataCoding(in.getHeader(SmppConstants.ALPHABET, Byte.class));
} else {
submitSm.setDataCoding(config.getDataCoding());
}
if (in.getHeaders().containsKey(SmppConstants.DEST_ADDR)) {
submitSm.setDestAddress(in.getHeader(SmppConstants.DEST_ADDR, String.class));
} else {
submitSm.setDestAddress(config.getDestAddr());
}
if (in.getHeaders().containsKey(SmppConstants.DEST_ADDR_TON)) {
submitSm.setDestAddrTon(in.getHeader(SmppConstants.DEST_ADDR_TON, Byte.class));
} else {
submitSm.setDestAddrTon(config.getDestAddrTon());
}
if (in.getHeaders().containsKey(SmppConstants.DEST_ADDR_NPI)) {
submitSm.setDestAddrNpi(in.getHeader(SmppConstants.DEST_ADDR_NPI, Byte.class));
} else {
submitSm.setDestAddrNpi(config.getDestAddrNpi());
}
if (in.getHeaders().containsKey(SmppConstants.SOURCE_ADDR)) {
submitSm.setSourceAddr(in.getHeader(SmppConstants.SOURCE_ADDR, String.class));
} else {
submitSm.setSourceAddr(config.getSourceAddr());
}
if (in.getHeaders().containsKey(SmppConstants.SOURCE_ADDR_TON)) {
submitSm.setSourceAddrTon(in.getHeader(SmppConstants.SOURCE_ADDR_TON, Byte.class));
} else {
submitSm.setSourceAddrTon(config.getSourceAddrTon());
}
if (in.getHeaders().containsKey(SmppConstants.SOURCE_ADDR_NPI)) {
submitSm.setSourceAddrNpi(in.getHeader(SmppConstants.SOURCE_ADDR_NPI, Byte.class));
} else {
submitSm.setSourceAddrNpi(config.getSourceAddrNpi());
}
if (in.getHeaders().containsKey(SmppConstants.SERVICE_TYPE)) {
submitSm.setServiceType(in.getHeader(SmppConstants.SERVICE_TYPE, String.class));
} else {
submitSm.setServiceType(config.getServiceType());
}
if (in.getHeaders().containsKey(SmppConstants.PROTOCOL_ID)) {
submitSm.setProtocolId(in.getHeader(SmppConstants.PROTOCOL_ID, Byte.class));
} else {
submitSm.setProtocolId(config.getProtocolId());
}
if (in.getHeaders().containsKey(SmppConstants.PRIORITY_FLAG)) {
submitSm.setPriorityFlag(in.getHeader(SmppConstants.PRIORITY_FLAG, Byte.class));
} else {
submitSm.setPriorityFlag(config.getPriorityFlag());
}
if (in.getHeaders().containsKey(SmppConstants.SCHEDULE_DELIVERY_TIME)) {
submitSm.setScheduleDeliveryTime(
SmppUtils.formatTime(in.getHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, Date.class)));
}
if (in.getHeaders().containsKey(SmppConstants.VALIDITY_PERIOD)) {
Object validityPeriod = in.getHeader(SmppConstants.VALIDITY_PERIOD);
if (validityPeriod instanceof String) {
submitSm.setValidityPeriod((String) validityPeriod);
} else if (validityPeriod instanceof Date) {
submitSm.setValidityPeriod(SmppUtils.formatTime((Date) validityPeriod));
}
}
if (in.getHeaders().containsKey(SmppConstants.REPLACE_IF_PRESENT_FLAG)) {
submitSm.setReplaceIfPresent(in.getHeader(SmppConstants.REPLACE_IF_PRESENT_FLAG, Byte.class));
} else {
submitSm.setReplaceIfPresent(config.getReplaceIfPresentFlag());
}
submitSm.setEsmClass(new ESMClass().value());
Map<java.lang.Short, Object> optinalParamater = in.getHeader(SmppConstants.OPTIONAL_PARAMETER, Map.class);
if (optinalParamater != null) {
List<OptionalParameter> optParams = createOptionalParametersByCode(optinalParamater);
submitSm.setOptionalParameters(optParams.toArray(new OptionalParameter[0]));
} else {
Map<String, String> optinalParamaters = in.getHeader(SmppConstants.OPTIONAL_PARAMETERS, Map.class);
if (optinalParamaters != null) {
List<OptionalParameter> optParams = createOptionalParametersByName(optinalParamaters);
submitSm.setOptionalParameters(optParams.toArray(new OptionalParameter[0]));
} else {
submitSm.setOptionalParameters();
}
}
return submitSm;
}
}
| SmppSubmitSmCommand |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/SerializationEntity.java | {
"start": 67,
"end": 288
} | class ____ {
private String name;
public String getName() {
return name;
}
public SerializationEntity setName(String name) {
this.name = name;
return this;
}
}
| SerializationEntity |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/associations/any/AnyBidirectionalTest.java | {
"start": 1332,
"end": 2937
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.persist( new PropertyHolder( 1L, new IntegerProperty( 1L, "acc_num", 1234 ) ) );
session.persist( new PropertyHolder( 2L, new StringProperty( 2L, "acc_name", "daily" ) ) );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from PropertyHolder" ).executeUpdate();
session.createMutationQuery( "delete from " + Property.class.getName() ).executeUpdate();
} );
}
@Test
public void testStringProperty(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final PropertyHolder propertyHolder = session.find( PropertyHolder.class, 2L );
assertThat( propertyHolder.getProperty().getName() ).isEqualTo( "acc_name" );
assertThat( propertyHolder.getProperty().getValue() ).isEqualTo( "daily" );
} );
}
@Test
public void testIntegerProperty(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final PropertyHolder propertyHolder = session.find( PropertyHolder.class, 1L );
assertThat( propertyHolder.getProperty().getName() ).isEqualTo( "acc_num" );
assertThat( propertyHolder.getProperty().getValue() ).isEqualTo( 1234 );
} );
}
@Test
public void testInverseAssociation(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final StringProperty stringProperty = session.find( StringProperty.class, 2L );
assertThat( stringProperty.getPropertyHolder().getId() ).isEqualTo( 2L );
} );
}
public | AnyBidirectionalTest |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/gen/pipeline/BinaryPipe.java | {
"start": 578,
"end": 2584
} | class ____ extends Pipe {
private final Pipe left, right;
public BinaryPipe(Source source, Expression expression, Pipe left, Pipe right) {
super(source, expression, Arrays.asList(left, right));
this.left = left;
this.right = right;
}
@Override
public final Pipe replaceChildren(List<Pipe> newChildren) {
return replaceChildren(newChildren.get(0), newChildren.get(1));
}
public Pipe left() {
return left;
}
public Pipe right() {
return right;
}
@Override
public boolean supportedByAggsOnlyQuery() {
return left.supportedByAggsOnlyQuery() || right.supportedByAggsOnlyQuery();
}
@Override
public final Pipe resolveAttributes(AttributeResolver resolver) {
Pipe newLeft = left.resolveAttributes(resolver);
Pipe newRight = right.resolveAttributes(resolver);
if (newLeft == left && newRight == right) {
return this;
}
return replaceChildren(newLeft, newRight);
}
/**
* Build a copy of this object with new left and right children. Used by
* {@link #resolveAttributes(AttributeResolver)}.
*/
protected abstract BinaryPipe replaceChildren(Pipe left, Pipe right);
@Override
public boolean resolved() {
return left().resolved() && right().resolved();
}
@Override
public final void collectFields(QlSourceBuilder sourceBuilder) {
left.collectFields(sourceBuilder);
right.collectFields(sourceBuilder);
}
@Override
public int hashCode() {
return Objects.hash(left(), right());
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
BinaryPipe other = (BinaryPipe) obj;
return Objects.equals(left(), other.left()) && Objects.equals(right(), other.right());
}
}
| BinaryPipe |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/type/Image.java | {
"start": 1283,
"end": 4807
} | class ____ extends MappedObject {
private final List<String> digests;
private final ImageConfig config;
private final List<LayerId> layers;
private final @Nullable String os;
private final @Nullable String architecture;
private final @Nullable String variant;
private final @Nullable String created;
private final @Nullable Descriptor descriptor;
Image(JsonNode node) {
super(node, MethodHandles.lookup());
this.digests = childrenAt("/RepoDigests", JsonNode::asString);
this.config = new ImageConfig(getNode().at("/Config"));
this.layers = extractLayers(valueAt("/RootFS/Layers", String[].class));
this.os = valueAt("/Os", String.class);
this.architecture = valueAt("/Architecture", String.class);
this.variant = valueAt("/Variant", String.class);
this.created = valueAt("/Created", String.class);
JsonNode descriptorNode = getNode().path("Descriptor");
this.descriptor = (descriptorNode.isMissingNode() || descriptorNode.isNull()) ? null
: new Descriptor(descriptorNode);
}
private List<LayerId> extractLayers(String @Nullable [] layers) {
if (layers == null) {
return Collections.emptyList();
}
return Arrays.stream(layers).map(LayerId::of).toList();
}
/**
* Return the digests of the image.
* @return the image digests
*/
public List<String> getDigests() {
return this.digests;
}
/**
* Return image config information.
* @return the image config
*/
public ImageConfig getConfig() {
return this.config;
}
/**
* Return the layer IDs contained in the image.
* @return the layer IDs.
*/
public List<LayerId> getLayers() {
return this.layers;
}
/**
* Return the OS of the image.
* @return the image OS
*/
public String getOs() {
return (StringUtils.hasText(this.os)) ? this.os : "linux";
}
/**
* Return the architecture of the image.
* @return the image architecture
*/
public @Nullable String getArchitecture() {
return this.architecture;
}
/**
* Return the variant of the image.
* @return the image variant
*/
public @Nullable String getVariant() {
return this.variant;
}
/**
* Return the created date of the image.
* @return the image created date
*/
public @Nullable String getCreated() {
return this.created;
}
/**
* Return the descriptor for this image as reported by Docker Engine inspect.
* @return the image descriptor or {@code null}
*/
public @Nullable Descriptor getDescriptor() {
return this.descriptor;
}
/**
* Return the primary digest of the image or {@code null}. Checks the
* {@code Descriptor.digest} first, falling back to {@code RepoDigest}.
* @return the primary digest or {@code null}
* @since 3.4.12
*/
public @Nullable String getPrimaryDigest() {
if (this.descriptor != null && StringUtils.hasText(this.descriptor.getDigest())) {
return this.descriptor.getDigest();
}
if (!CollectionUtils.isEmpty(this.digests)) {
try {
String digest = this.digests.get(0);
return (digest != null) ? ImageReference.of(digest).getDigest() : null;
}
catch (RuntimeException ex) {
}
}
return null;
}
/**
* Create a new {@link Image} instance from the specified JSON content.
* @param content the JSON content
* @return a new {@link Image} instance
* @throws IOException on IO error
*/
public static Image of(InputStream content) throws IOException {
return of(content, Image::new);
}
/**
* Descriptor details as reported in the {@code Docker inspect} response.
*
* @since 3.4.12
*/
public final | Image |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/CatalogViewImpl.java | {
"start": 1352,
"end": 1424
} | class ____ of the common interface.
*/
@Deprecated
@Internal
public | instead |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/connector/file/table/FileSystemTableSourceStreamingITCase.java | {
"start": 1581,
"end": 6275
} | class ____ extends StreamingTestBase {
@Test
void testMonitorContinuously() throws Exception {
// Create temp dir
File testPath = TempDirUtils.newFolder(tempFolder());
// Write first csv file out
Files.write(
Paths.get(testPath.getPath(), "input_0.csv"),
Arrays.asList("1", "2", "3"),
StandardOpenOption.CREATE);
Duration monitorInterval = Duration.ofSeconds(1);
tEnv().createTable(
"my_streaming_table",
TableDescriptor.forConnector("filesystem")
.schema(Schema.newBuilder().column("data", DataTypes.INT()).build())
.format("testcsv")
.option(FileSystemConnectorOptions.PATH, testPath.getPath())
.option(
FileSystemConnectorOptions.SOURCE_MONITOR_INTERVAL,
monitorInterval)
.build());
List<Integer> actual = new ArrayList<>();
try (CloseableIterator<Row> resultsIterator =
tEnv().sqlQuery("SELECT * FROM my_streaming_table").execute().collect()) {
// Iterate over the first 3 rows
for (int i = 0; i < 3; i++) {
actual.add(resultsIterator.next().<Integer>getFieldAs(0));
}
// Write second csv file out
Files.write(
Paths.get(testPath.getPath(), "input_1.csv"),
Arrays.asList("4", "5", "6"),
StandardOpenOption.CREATE);
// Iterate over the next 3 rows
for (int i = 0; i < 3; i++) {
actual.add(resultsIterator.next().<Integer>getFieldAs(0));
}
}
assertThat(actual).containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6);
}
@Test
void testSourceWithRegexPattern() throws Exception {
// Create temp dir
File testPath0 = TempDirUtils.newFolder(tempFolder(), "dir0");
File testPath1 = TempDirUtils.newFolder(tempFolder(), "dir1");
// Write first csv file out
Files.write(
Paths.get(testPath0.getPath(), "input_0.csv"),
Arrays.asList("1", "2", "3"),
StandardOpenOption.CREATE);
Files.write(
Paths.get(testPath1.getPath(), "input_0.csv"),
Arrays.asList("4", "5", "6"),
StandardOpenOption.CREATE);
// Write a text file out, which should be filtered out
Files.write(
Paths.get(testPath0.getPath(), "input_0.txt"),
Arrays.asList("1", "2", "3"),
StandardOpenOption.CREATE);
Duration monitorInterval = Duration.ofSeconds(1);
tEnv().createTable(
"my_streaming_table",
TableDescriptor.forConnector("filesystem")
.schema(Schema.newBuilder().column("data", DataTypes.INT()).build())
.format("testcsv")
.option(
FileSystemConnectorOptions.PATH,
tempFolder().toFile().getPath())
.option(
FileSystemConnectorOptions.SOURCE_PATH_REGEX_PATTERN,
"/.*/input_[0-9]+.csv")
.option(
FileSystemConnectorOptions.SOURCE_MONITOR_INTERVAL,
monitorInterval)
.build());
List<Integer> actual = new ArrayList<>();
try (CloseableIterator<Row> resultsIterator =
tEnv().sqlQuery("SELECT * FROM my_streaming_table").execute().collect()) {
// Iterate over the first 6 rows
for (int i = 0; i < 6; i++) {
actual.add(resultsIterator.next().<Integer>getFieldAs(0));
}
// Write second csv file out
Files.write(
Paths.get(testPath0.getPath(), "input_1.csv"),
Arrays.asList("7", "8", "9"),
StandardOpenOption.CREATE);
// Iterate over the next 3 rows
for (int i = 0; i < 3; i++) {
actual.add(resultsIterator.next().<Integer>getFieldAs(0));
}
}
assertThat(actual).containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9);
}
}
| FileSystemTableSourceStreamingITCase |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/BasicLoggingTest.java | {
"start": 1011,
"end": 1237
} | class ____ {
@Test
void test1() {
final Logger logger = LogManager.getLogger(BasicLoggingTest.class.getName());
logger.debug("debug not set");
logger.error("Test message");
}
}
| BasicLoggingTest |
java | apache__maven | compat/maven-compat/src/test/java/org/apache/maven/project/inheritance/t01/ProjectInheritanceTest.java | {
"start": 1302,
"end": 3574
} | class ____ extends AbstractProjectInheritanceTestCase {
// ----------------------------------------------------------------------
//
// p4 inherits from p3
// p3 inherits from p2
// p2 inherits from p1
// p1 inherits from p0
// p0 inherits from super model
//
// or we can show it graphically as:
//
// p4 ---> p3 ---> p2 ---> p1 ---> p0 --> super model
//
// ----------------------------------------------------------------------
@Test
void testProjectInheritance() throws Exception {
// ----------------------------------------------------------------------
// Check p0 value for org name
// ----------------------------------------------------------------------
MavenProject p0 = getProject(projectFile("maven.t01", "p0"));
assertEquals("p0-org", p0.getOrganization().getName());
// ----------------------------------------------------------------------
// Check p1 value for org name
// ----------------------------------------------------------------------
MavenProject p1 = getProject(projectFile("maven.t01", "p1"));
assertEquals("p1-org", p1.getOrganization().getName());
// ----------------------------------------------------------------------
// Check p2 value for org name
// ----------------------------------------------------------------------
MavenProject p2 = getProject(projectFile("maven.t01", "p2"));
assertEquals("p2-org", p2.getOrganization().getName());
// ----------------------------------------------------------------------
// Check p2 value for org name
// ----------------------------------------------------------------------
MavenProject p3 = getProject(projectFile("maven.t01", "p3"));
assertEquals("p3-org", p3.getOrganization().getName());
// ----------------------------------------------------------------------
// Check p4 value for org name
// ----------------------------------------------------------------------
MavenProject p4 = getProject(projectFile("maven.t01", "p4"));
assertEquals("p4-org", p4.getOrganization().getName());
}
}
| ProjectInheritanceTest |
java | google__auto | value/src/main/java/com/google/auto/value/extension/AutoValueExtension.java | {
"start": 4825,
"end": 4964
} | class ____ {...}}, this will be {@code Foo}.
*/
TypeElement autoValueClass();
/**
* The fully-qualified name of the last | Foo |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/DragoonMetaInfTest.java | {
"start": 868,
"end": 1843
} | class ____ extends TestCase {
public void test_0() throws Exception {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
Enumeration<URL> e = classLoader.getResources("META-INF/dragoon-filter.properties");
while (e.hasMoreElements()) {
URL url = e.nextElement();
InputStream is = url.openStream();
Properties properties = new Properties();
try {
properties.load(is);
} finally {
is.close();
}
}
}
private static final int DEFAULT_BUFFER_SIZE = 1024 * 4;
public static long copyLarge(Reader input, Writer output) throws IOException {
char[] buffer = new char[DEFAULT_BUFFER_SIZE];
long count = 0;
int n = 0;
while (-1 != (n = input.read(buffer))) {
output.write(buffer, 0, n);
count += n;
}
return count;
}
}
| DragoonMetaInfTest |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/internal/LeakSafeOneWayBinder.java | {
"start": 1484,
"end": 1549
} | class ____
* supports one-way calls.
*/
@Internal
public final | only |
java | apache__camel | components/camel-aws/camel-aws2-kinesis/src/test/java/org/apache/camel/component/aws2/kinesis/KinesisConsumerClosedShardWithFailTest.java | {
"start": 2481,
"end": 5589
} | class ____ {
@Mock
private KinesisClient kinesisClient;
@Mock
private AsyncProcessor processor;
private final CamelContext context = new DefaultCamelContext();
private final Kinesis2Component component = new Kinesis2Component(context);
private Kinesis2Consumer underTest;
@BeforeEach
public void setup() {
SequenceNumberRange range = SequenceNumberRange.builder().endingSequenceNumber("20").build();
Shard shard = Shard.builder().shardId("shardId").sequenceNumberRange(range).build();
ArrayList<Shard> shardList = new ArrayList<>();
shardList.add(shard);
when(kinesisClient
.getRecords(any(GetRecordsRequest.class)))
.thenReturn(GetRecordsResponse.builder().nextShardIterator(null).build());
when(kinesisClient
.getShardIterator(any(GetShardIteratorRequest.class)))
.thenReturn(GetShardIteratorResponse.builder().shardIterator("shardIterator").build());
when(kinesisClient
.listShards(any(ListShardsRequest.class)))
.thenReturn(ListShardsResponse.builder().shards(shardList).build());
component.start();
Kinesis2Configuration configuration = new Kinesis2Configuration();
configuration.setAmazonKinesisClient(kinesisClient);
configuration.setIteratorType(ShardIteratorType.LATEST);
configuration.setShardClosed(Kinesis2ShardClosedStrategyEnum.fail);
configuration.setStreamName("streamName");
Kinesis2Endpoint endpoint = new Kinesis2Endpoint("aws2-kinesis:foo", configuration, component);
endpoint.start();
underTest = new Kinesis2Consumer(endpoint, processor);
underTest.setConnection(component.getConnection());
underTest.start();
await().atMost(10, TimeUnit.SECONDS)
.until(() -> !(underTest.getCurrentShardList().isEmpty()));
}
@Test
public void itObtainsAShardIteratorOnFirstPoll() {
try {
underTest.poll();
} catch (Exception e) {
fail("The first call should not throw an exception");
}
assertThrows(IllegalStateException.class, () -> {
underTest.poll();
});
final ArgumentCaptor<GetShardIteratorRequest> getShardIteratorReqCap
= ArgumentCaptor.forClass(GetShardIteratorRequest.class);
final ArgumentCaptor<ListShardsRequest> getListShardsCap
= ArgumentCaptor.forClass(ListShardsRequest.class);
verify(kinesisClient).getShardIterator(getShardIteratorReqCap.capture());
assertThat(getShardIteratorReqCap.getValue().streamName(), is("streamName"));
assertThat(getShardIteratorReqCap.getValue().shardId(), is("shardId"));
assertThat(getShardIteratorReqCap.getValue().shardIteratorType(), is(ShardIteratorType.LATEST));
verify(kinesisClient, atLeastOnce()).listShards(getListShardsCap.capture());
assertThat(getListShardsCap.getValue().streamName(), is("streamName"));
}
}
| KinesisConsumerClosedShardWithFailTest |
java | spring-projects__spring-boot | module/spring-boot-webservices-test/src/main/java/org/springframework/boot/webservices/test/autoconfigure/server/AutoConfigureMockWebServiceClient.java | {
"start": 1124,
"end": 1388
} | class ____ enable auto-configuration of
* {@link MockWebServiceClient}.
*
* @author Daniil Razorenov
* @since 4.0.0
*/
@Target({ ElementType.TYPE, ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
@ImportAutoConfiguration
public @ | to |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/ScheduledAnnotationBeanPostProcessorObservabilityTests.java | {
"start": 8747,
"end": 9014
} | class ____ extends TaskTester {
@Scheduled(fixedDelay = 10_000, initialDelay = 5_000)
Mono<Object> error() {
return Mono.error(new IllegalStateException("test error"))
.doOnTerminate(() -> this.latch.countDown());
}
}
static | FixedDelayReactiveErrorBean |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/secrets/action/TransportPostConnectorSecretActionTests.java | {
"start": 1042,
"end": 2668
} | class ____ extends ESSingleNodeTestCase {
private static final Long TIMEOUT_SECONDS = 10L;
private final ThreadPool threadPool = new TestThreadPool(getClass().getName());
private TransportPostConnectorSecretAction action;
@Before
public void setup() {
TransportService transportService = new TransportService(
Settings.EMPTY,
mock(Transport.class),
threadPool,
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> null,
null,
Collections.emptySet()
);
action = new TransportPostConnectorSecretAction(transportService, mock(ActionFilters.class), client());
}
@Override
public void tearDown() throws Exception {
super.tearDown();
ThreadPool.terminate(threadPool, TIMEOUT_SECONDS, TimeUnit.SECONDS);
}
public void testPostConnectorSecret_ExpectNoWarnings() throws InterruptedException {
PostConnectorSecretRequest request = ConnectorSecretsTestUtils.getRandomPostConnectorSecretRequest();
executeRequest(request);
ensureNoWarnings();
}
private void executeRequest(PostConnectorSecretRequest request) throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
action.doExecute(mock(Task.class), request, ActionListener.wrap(response -> latch.countDown(), exception -> latch.countDown()));
boolean requestTimedOut = latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS);
assertTrue("Timeout waiting for post request", requestTimedOut);
}
}
| TransportPostConnectorSecretActionTests |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java | {
"start": 1415,
"end": 18057
} | class ____ implements EventBusInternal, MetricsProvider {
private static final AtomicReferenceFieldUpdater<EventBusImpl, Handler[]> OUTBOUND_INTERCEPTORS_UPDATER = AtomicReferenceFieldUpdater.newUpdater(EventBusImpl.class, Handler[].class, "outboundInterceptors");
private static final AtomicReferenceFieldUpdater<EventBusImpl, Handler[]> INBOUND_INTERCEPTORS_UPDATER = AtomicReferenceFieldUpdater.newUpdater(EventBusImpl.class, Handler[].class, "inboundInterceptors");
private volatile Handler<DeliveryContext<?>>[] outboundInterceptors = new Handler[0];
private volatile Handler<DeliveryContext<?>>[] inboundInterceptors = new Handler[0];
private final AtomicLong replySequence = new AtomicLong(0);
protected final VertxInternal vertx;
protected final EventBusMetrics metrics;
protected final ConcurrentMap<String, ConcurrentCyclicSequence<HandlerHolder>> handlerMap = new ConcurrentHashMap<>();
protected final CodecManager codecManager = new CodecManager();
protected volatile boolean started;
public EventBusImpl(VertxInternal vertx) {
VertxMetrics metrics = vertx.metrics();
this.vertx = vertx;
this.metrics = metrics != null ? metrics.createEventBusMetrics() : null;
}
@Override
public <T> EventBus addOutboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
addInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus addInboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
addInterceptor(INBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus removeOutboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
removeInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus removeInboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
removeInterceptor(INBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
Handler<DeliveryContext<?>>[] inboundInterceptors() {
return inboundInterceptors;
}
Handler<DeliveryContext<?>>[] outboundInterceptors() {
return outboundInterceptors;
}
@Override
public EventBus clusterSerializableChecker(Function<String, Boolean> classNamePredicate) {
codecManager.clusterSerializableCheck(classNamePredicate);
return this;
}
@Override
public EventBus serializableChecker(Function<String, Boolean> classNamePredicate) {
codecManager.serializableCheck(classNamePredicate);
return this;
}
@Override
public synchronized void start(Promise<Void> promise) {
if (started) {
throw new IllegalStateException("Already started");
}
started = true;
promise.complete();
}
@Override
public EventBus send(String address, Object message) {
return send(address, message, new DeliveryOptions());
}
@Override
public EventBus send(String address, Object message, DeliveryOptions options) {
MessageImpl msg = createMessage(true, isLocalOnly(options), address, options.getHeaders(), message, options.getCodecName());
sendOrPubInternal(msg, options, null);
return this;
}
@Override
public <T> Future<Message<T>> request(String address, Object message, DeliveryOptions options) {
MessageImpl msg = createMessage(true, isLocalOnly(options), address, options.getHeaders(), message, options.getCodecName());
ReplyHandler<T> handler = createReplyHandler(msg, true, options);
sendOrPubInternal(msg, options, handler);
return handler.result();
}
@Override
public <T> MessageProducer<T> sender(String address) {
Objects.requireNonNull(address, "address");
return new MessageProducerImpl<>(vertx, address, true, new DeliveryOptions());
}
@Override
public <T> MessageProducer<T> sender(String address, DeliveryOptions options) {
Objects.requireNonNull(address, "address");
Objects.requireNonNull(options, "options");
return new MessageProducerImpl<>(vertx, address, true, options);
}
@Override
public <T> MessageProducer<T> publisher(String address) {
Objects.requireNonNull(address, "address");
return new MessageProducerImpl<>(vertx, address, false, new DeliveryOptions());
}
@Override
public <T> MessageProducer<T> publisher(String address, DeliveryOptions options) {
Objects.requireNonNull(address, "address");
Objects.requireNonNull(options, "options");
return new MessageProducerImpl<>(vertx, address, false, options);
}
@Override
public EventBus publish(String address, Object message) {
return publish(address, message, new DeliveryOptions());
}
@Override
public EventBus publish(String address, Object message, DeliveryOptions options) {
sendOrPubInternal(createMessage(false, isLocalOnly(options), address, options.getHeaders(), message, options.getCodecName()), options, null);
return this;
}
@Override
public <T> MessageConsumer<T> consumer(MessageConsumerOptions options) {
checkStarted();
String address = options.getAddress();
Arguments.require(options.getAddress() != null, "Consumer address must not be null");
return new MessageConsumerImpl<>(vertx.getOrCreateContext(), this, address, options.isLocalOnly(), options.getMaxBufferedMessages());
}
@Override
public <T> MessageConsumer<T> consumer(MessageConsumerOptions options, Handler<Message<T>> handler) {
Objects.requireNonNull(handler, "handler");
MessageConsumer<T> consumer = consumer(options);
consumer.handler(handler);
return consumer;
}
@Override
public <T> MessageConsumer<T> consumer(String address) {
checkStarted();
Objects.requireNonNull(address, "address");
return new MessageConsumerImpl<>(vertx.getOrCreateContext(), this, address, false, MessageConsumerOptions.DEFAULT_MAX_BUFFERED_MESSAGES);
}
@Override
public <T> MessageConsumer<T> consumer(String address, Handler<Message<T>> handler) {
Objects.requireNonNull(handler, "handler");
MessageConsumer<T> consumer = consumer(address);
consumer.handler(handler);
return consumer;
}
@Override
public <T> MessageConsumer<T> localConsumer(String address) {
checkStarted();
Objects.requireNonNull(address, "address");
return new MessageConsumerImpl<>(vertx.getOrCreateContext(), this, address, true, MessageConsumerOptions.DEFAULT_MAX_BUFFERED_MESSAGES);
}
@Override
public <T> MessageConsumer<T> localConsumer(String address, Handler<Message<T>> handler) {
Objects.requireNonNull(handler, "handler");
MessageConsumer<T> consumer = localConsumer(address);
consumer.handler(handler);
return consumer;
}
@Override
public EventBus registerCodec(MessageCodec codec) {
codecManager.registerCodec(codec);
return this;
}
@Override
public EventBus unregisterCodec(String name) {
codecManager.unregisterCodec(name);
return this;
}
@Override
public <T> EventBus registerDefaultCodec(Class<T> clazz, MessageCodec<T, ?> codec) {
codecManager.registerDefaultCodec(clazz, codec);
return this;
}
@Override
public EventBus unregisterDefaultCodec(Class clazz) {
codecManager.unregisterDefaultCodec(clazz);
return this;
}
@Override
public EventBus codecSelector(Function<Object, String> selector) {
codecManager.codecSelector(selector);
return this;
}
@Override
public void close(Promise<Void> promise) {
if (!started) {
promise.complete();
return;
}
unregisterAll().onComplete(ar -> {
if (metrics != null) {
metrics.close();
}
promise.handle(ar);
});
}
@Override
public boolean isMetricsEnabled() {
return metrics != null;
}
@Override
public EventBusMetrics<?> getMetrics() {
return metrics;
}
public MessageImpl createMessage(boolean send, boolean localOnly, String address, MultiMap headers, Object body, String codecName) {
Objects.requireNonNull(address, "no null address accepted");
MessageCodec codec = codecManager.lookupCodec(body, codecName, localOnly);
@SuppressWarnings("unchecked")
MessageImpl msg = new MessageImpl(address, headers, body, codec, send, this);
return msg;
}
protected <T> Consumer<Promise<Void>> addRegistration(String address, HandlerRegistration<T> registration, boolean broadcast, boolean localOnly, Completable<Void> promise) {
HandlerHolder<T> holder = addLocalRegistration(address, registration, localOnly);
if (broadcast) {
onLocalRegistration(holder, promise);
} else {
promise.succeed();
}
return p -> {
removeRegistration(holder, broadcast, p);
};
}
protected <T> void onLocalRegistration(HandlerHolder<T> handlerHolder, Completable<Void> promise) {
promise.succeed();
}
private <T> HandlerHolder<T> addLocalRegistration(String address, HandlerRegistration<T> registration,
boolean localOnly) {
Objects.requireNonNull(address, "address");
ContextInternal context = registration.context;
HandlerHolder<T> holder = createHandlerHolder(registration, localOnly, context);
ConcurrentCyclicSequence<HandlerHolder> handlers = new ConcurrentCyclicSequence<HandlerHolder>().add(holder);
ConcurrentCyclicSequence<HandlerHolder> actualHandlers = handlerMap.merge(
address,
handlers,
(old, prev) -> old.add(prev.first()));
if (context.isDeployment()) {
context.addCloseHook(registration);
}
return holder;
}
protected <T> HandlerHolder<T> createHandlerHolder(HandlerRegistration<T> registration, boolean localOnly, ContextInternal context) {
return new HandlerHolder<>(registration, localOnly, context);
}
protected <T> void removeRegistration(HandlerHolder<T> handlerHolder, boolean broadcast, Promise<Void> promise) {
removeLocalRegistration(handlerHolder);
if (broadcast) {
onLocalUnregistration(handlerHolder, promise);
} else {
promise.complete();
}
}
protected <T> void onLocalUnregistration(HandlerHolder<T> handlerHolder, Completable<Void> promise) {
promise.succeed();
}
private <T> void removeLocalRegistration(HandlerHolder<T> holder) {
String address = holder.getHandler().address;
handlerMap.compute(address, (key, val) -> {
if (val == null) {
return null;
}
ConcurrentCyclicSequence<HandlerHolder> next = val.remove(holder);
return next.size() == 0 ? null : next;
});
if (holder.setRemoved() && holder.getContext().deploymentID() != null) {
holder.getContext().removeCloseHook(holder.getHandler());
}
}
protected <T> void sendReply(MessageImpl<?, T> replyMessage, DeliveryOptions options, ReplyHandler<T> replyHandler) {
if (replyMessage.address() == null) {
throw new IllegalStateException("address not specified");
} else {
sendOrPubInternal(new SendContext<>(vertx.getOrCreateContext(), replyMessage, options, replyHandler));
}
}
protected <T> void sendOrPub(ContextInternal ctx, MessageImpl<?, T> message, DeliveryOptions options, Promise<Void> writePromise) {
sendLocally(message, writePromise);
}
protected <T> void sendOrPub(SendContext<T> sendContext) {
sendOrPub(sendContext.ctx, sendContext.message, sendContext.options, sendContext);
}
protected <T> void sendLocally(MessageImpl<?, T> message, Promise<Void> writePromise) {
ReplyException failure = deliverMessageLocally(message);
if (failure != null) {
writePromise.tryFail(failure);
} else {
writePromise.tryComplete();
}
}
protected boolean isMessageLocal(MessageImpl msg) {
return true;
}
protected ReplyException deliverMessageLocally(MessageImpl msg) {
ConcurrentCyclicSequence<HandlerHolder> handlers = handlerMap.get(msg.address());
boolean messageLocal = isMessageLocal(msg);
if (handlers != null) {
if (msg.isSend()) {
//Choose one
HandlerHolder holder = nextHandler(handlers, messageLocal);
if (metrics != null) {
metrics.messageReceived(msg.address(), !msg.isSend(), messageLocal, holder != null ? 1 : 0);
}
if (holder != null) {
holder.handler.receive(msg.copyBeforeReceive());
} else {
// RACY issue !!!!!
}
} else {
// Publish
if (metrics != null) {
metrics.messageReceived(msg.address(), !msg.isSend(), messageLocal, handlers.size());
}
for (HandlerHolder holder : handlers) {
if (messageLocal || !holder.isLocalOnly()) {
holder.handler.receive(msg.copyBeforeReceive());
}
}
}
return null;
} else {
if (metrics != null) {
metrics.messageReceived(msg.address(), !msg.isSend(), messageLocal, 0);
}
return new ReplyException(ReplyFailure.NO_HANDLERS, "No handlers for address " + msg.address);
}
}
protected HandlerHolder nextHandler(ConcurrentCyclicSequence<HandlerHolder> handlers, boolean messageLocal) {
return handlers.next();
}
protected void checkStarted() {
if (!started) {
throw new IllegalStateException("Event Bus is not started");
}
}
protected String generateReplyAddress() {
return "__vertx.reply." + Long.toString(replySequence.incrementAndGet());
}
<T> ReplyHandler<T> createReplyHandler(MessageImpl message,
boolean src,
DeliveryOptions options) {
long timeout = options.getSendTimeout();
String replyAddress = generateReplyAddress();
message.setReplyAddress(replyAddress);
ReplyHandler<T> handler = new ReplyHandler<>(this, vertx.getOrCreateContext(), replyAddress, message.address, src, timeout);
handler.register();
return handler;
}
<T> SendContext<T> newSendContext(MessageImpl<?, T> message, DeliveryOptions options, ReplyHandler<T> handler) {
return new SendContext<>(vertx.getOrCreateContext(), message, options, handler);
}
public <T> void sendOrPubInternal(SendContext<T> senderCtx) {
checkStarted();
senderCtx.bus = this;
senderCtx.metrics = metrics;
senderCtx.send();
}
<T> Future<Void> sendOrPubInternal(MessageImpl<?, T> message, DeliveryOptions options,
ReplyHandler<T> handler) {
checkStarted();
SendContext<T> ctx = newSendContext(message, options, handler);
sendOrPubInternal(ctx);
Future<Void> future = ctx.writePromise.future();
if (message.send) {
return future;
}
return future.recover(throwable -> {
// For publish, we only care if there are no handlers
if (throwable instanceof ReplyException) {
return Future.failedFuture(throwable);
}
return Future.succeededFuture();
});
}
private Future<Void> unregisterAll() {
// Unregister all handlers explicitly - don't rely on context hooks
List<Future<?>> futures = new ArrayList<>();
for (ConcurrentCyclicSequence<HandlerHolder> handlers : handlerMap.values()) {
for (HandlerHolder holder : handlers) {
futures.add(holder.getHandler().unregister());
}
}
return Future.join(futures).mapEmpty();
}
private void addInterceptor(AtomicReferenceFieldUpdater<EventBusImpl, Handler[]> updater, Handler interceptor) {
while (true) {
Handler[] interceptors = updater.get(this);
Handler[] copy = Arrays.copyOf(interceptors, interceptors.length + 1);
copy[interceptors.length] = interceptor;
if (updater.compareAndSet(this, interceptors, copy)) {
break;
}
}
}
private void removeInterceptor(AtomicReferenceFieldUpdater<EventBusImpl, Handler[]> updater, Handler interceptor) {
while (true) {
Handler[] interceptors = updater.get(this);
int idx = -1;
for (int i = 0; i < interceptors.length; i++) {
if (interceptors[i].equals(interceptor)) {
idx = i;
break;
}
}
if (idx == -1) {
return;
}
Handler<DeliveryContext>[] copy = new Handler[interceptors.length - 1];
System.arraycopy(interceptors, 0, copy, 0, idx);
System.arraycopy(interceptors, idx + 1, copy, idx, copy.length - idx);
if (updater.compareAndSet(this, interceptors, copy)) {
break;
}
}
}
private boolean isLocalOnly(DeliveryOptions options) {
if (vertx.isClustered()) {
return options.isLocalOnly();
}
return true;
}
}
| EventBusImpl |
java | spring-projects__spring-security | cas/src/main/java/org/springframework/security/cas/ServiceProperties.java | {
"start": 1136,
"end": 4405
} | class ____ implements InitializingBean {
public static final String DEFAULT_CAS_ARTIFACT_PARAMETER = "ticket";
public static final String DEFAULT_CAS_SERVICE_PARAMETER = "service";
private @Nullable String service;
private boolean authenticateAllArtifacts;
private boolean sendRenew = false;
private String artifactParameter = DEFAULT_CAS_ARTIFACT_PARAMETER;
private String serviceParameter = DEFAULT_CAS_SERVICE_PARAMETER;
@Override
public void afterPropertiesSet() {
Assert.hasLength(this.service, "service cannot be empty.");
Assert.hasLength(this.artifactParameter, "artifactParameter cannot be empty.");
Assert.hasLength(this.serviceParameter, "serviceParameter cannot be empty.");
}
/**
* Represents the service the user is authenticating to.
* <p>
* This service is the callback URL belonging to the local Spring Security System for
* Spring secured application. For example,
*
* <pre>
* https://www.mycompany.com/application/login/cas
* </pre>
* @return the URL of the service the user is authenticating to
*/
public final @Nullable String getService() {
return this.service;
}
/**
* Indicates whether the <code>renew</code> parameter should be sent to the CAS login
* URL and CAS validation URL.
* <p>
* If <code>true</code>, it will force CAS to authenticate the user again (even if the
* user has previously authenticated). During ticket validation it will require the
* ticket was generated as a consequence of an explicit login. High security
* applications would probably set this to <code>true</code>. Defaults to
* <code>false</code>, providing automated single sign on.
* @return whether to send the <code>renew</code> parameter to CAS
*/
public final boolean isSendRenew() {
return this.sendRenew;
}
public final void setSendRenew(final boolean sendRenew) {
this.sendRenew = sendRenew;
}
public final void setService(final String service) {
this.service = service;
}
public final String getArtifactParameter() {
return this.artifactParameter;
}
/**
* Configures the Request Parameter to look for when attempting to see if a CAS ticket
* was sent from the server.
* @param artifactParameter the id to use. Default is "ticket".
*/
public final void setArtifactParameter(final String artifactParameter) {
this.artifactParameter = artifactParameter;
}
/**
* Configures the Request parameter to look for when attempting to send a request to
* CAS.
* @return the service parameter to use. Default is "service".
*/
public final String getServiceParameter() {
return this.serviceParameter;
}
public final void setServiceParameter(final String serviceParameter) {
this.serviceParameter = serviceParameter;
}
public final boolean isAuthenticateAllArtifacts() {
return this.authenticateAllArtifacts;
}
/**
* If true, then any non-null artifact (ticket) should be authenticated. Additionally,
* the service will be determined dynamically in order to ensure the service matches
* the expected value for this artifact.
* @param authenticateAllArtifacts
*/
public final void setAuthenticateAllArtifacts(final boolean authenticateAllArtifacts) {
this.authenticateAllArtifacts = authenticateAllArtifacts;
}
}
| ServiceProperties |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DeadThreadTest.java | {
"start": 1130,
"end": 1433
} | class ____ {
{
// BUG: Diagnostic contains:
new Thread();
}
}
""")
.doTest();
}
@Test
public void negative() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | quarkusio__quarkus | integration-tests/gradle/src/test/java/io/quarkus/gradle/GrpcMultiModuleQuarkusBuildTest.java | {
"start": 266,
"end": 1803
} | class ____ extends QuarkusGradleWrapperTestBase {
@Test
public void testGrpcMultiModuleBuild() throws Exception {
final File projectDir = getProjectDir("grpc-multi-module-project");
runGradleWrapper(projectDir, ":application:quarkusBuild", ":application:test");
final Path commonLibs = projectDir.toPath().resolve("common").resolve("build").resolve("libs");
assertThat(commonLibs).exists();
assertThat(commonLibs.resolve("common.jar")).exists();
final Path applicationLib = projectDir.toPath().resolve("application").resolve("build").resolve("quarkus-app")
.resolve("lib").resolve("main");
assertThat(applicationLib).exists();
assertThat(applicationLib.resolve("quarkus-grpc-multi-module-build.common.jar")).exists();
}
@Test
public void testProtocErrorOutput() throws Exception {
final File projectDir = getProjectDir("grpc-multi-module-project");
final Path protoDirectory = new File(projectDir, "application/src/main/proto/").toPath();
Files.copy(projectDir.toPath().resolve("invalid.proto"), protoDirectory.resolve("invalid.proto"));
try {
final BuildResult buildResult = runGradleWrapper(true, projectDir, ":application:quarkusBuild", "--info");
assertTrue(buildResult.getOutput().contains("invalid.proto:5:1: Missing field number."));
} finally {
Files.delete(protoDirectory.resolve("invalid.proto"));
}
}
}
| GrpcMultiModuleQuarkusBuildTest |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/Mockito.java | {
"start": 76311,
"end": 82434
} | class ____ makes it
* possible to change the strictness for a single or a few tests.
* </li>
* <li>{@link MockitoSession#finishMocking(Throwable)} was added to avoid confusion that may arise because
* there are multiple competing failures. It will disable certain checks when the supplied <em>failure</em>
* is not {@code null}.
* </li>
* </ul>
*
* <h3 id="44">44. <a class="meaningful_link" href="#mockito_instantiator_provider_deprecation" name="mockito_instantiator_provider_deprecation">
* Deprecated <code>org.mockito.plugins.InstantiatorProvider</code> as it was leaking internal API. it was
* replaced by <code>org.mockito.plugins.InstantiatorProvider2 (Since 2.15.4)</code></a></h3>
*
* <p>org.mockito.plugins.InstantiatorProvider returned an internal API. Hence it was deprecated and replaced
* by {@link org.mockito.plugins.InstantiatorProvider2}. org.mockito.plugins.InstantiatorProvider
* has now been removed.</p>
*
* <h3 id="45">45. <a class="meaningful_link" href="#junit5_mockito" name="junit5_mockito">New JUnit Jupiter (JUnit5+) extension</a></h3>
*
* For integration with JUnit Jupiter (JUnit5+), use the `org.mockito:mockito-junit-jupiter` artifact.
* For more information about the usage of the integration, see <a href="https://javadoc.io/doc/org.mockito/mockito-junit-jupiter/latest/org.mockito.junit.jupiter/org/mockito/junit/jupiter/MockitoExtension.html">the JavaDoc of <code>MockitoExtension</code></a>.
*
* <h3 id="46">46. <a class="meaningful_link" href="#mockito_lenient" name="mockito_lenient">
* New <code>Mockito.lenient()</code> and <code>MockSettings.lenient()</code> methods (Since 2.20.0)</a></h3>
*
* Strict stubbing feature is available since early Mockito 2.
* It is very useful because it drives cleaner tests and improved productivity.
* Strict stubbing reports unnecessary stubs, detects stubbing argument mismatch and makes the tests more DRY ({@link Strictness#STRICT_STUBS}).
* This comes with a trade-off: in some cases, you may get false negatives from strict stubbing.
* To remedy those scenarios you can now configure specific stubbing to be lenient, while all the other stubbings and mocks use strict stubbing:
*
* <pre class="code"><code class="java">
* lenient().when(mock.foo()).thenReturn("ok");
* </code></pre>
*
* If you want all the stubbings on a given mock to be lenient, you can configure the mock accordingly:
*
* <pre class="code"><code class="java">
* Foo mock = Mockito.mock(Foo.class, withSettings().lenient());
* </code></pre>
*
* For more information refer to {@link Mockito#lenient()}.
* Let us know how do you find the new feature by opening a GitHub issue to discuss!
*
* <h3 id="47">47. <a class="meaningful_link" href="#clear_inline_mocks" name="clear_inline_mocks">New API for clearing mock state in inline mocking (Since 2.25.0)</a></h3>
*
* In certain specific, rare scenarios (issue <a href="https://github.com/mockito/mockito/pull/1619">#1619</a>)
* inline mocking causes memory leaks.
* There is no clean way to mitigate this problem completely.
* Hence, we introduced a new API to explicitly clear mock state (only make sense in inline mocking!).
* See example usage in {@link MockitoFramework#clearInlineMocks()}.
* If you have feedback or a better idea how to solve the problem please reach out.
*
*
* <h3 id="48">48. <a class="meaningful_link" href="#static_mocks" name="static_mocks">Mocking static methods</a> (since 3.4.0)</h3>
*
* When using the <a href="#0.2">inline mock maker</a>, it is possible to mock static method invocations within the current
* thread and a user-defined scope. This way, Mockito assures that concurrently and sequentially running tests do not interfere.
*
* To make sure a static mock remains temporary, it is recommended to define the scope within a try-with-resources construct.
* In the following example, the <code>Foo</code> type's static method would return <code>foo</code> unless mocked:
*
* <pre class="code"><code class="java">
* assertEquals("foo", Foo.method());
* try (MockedStatic<Foo> mocked = mockStatic(Foo.class)) {
* mocked.when(Foo::method).thenReturn("bar");
* assertEquals("bar", Foo.method());
* mocked.verify(Foo::method);
* }
* assertEquals("foo", Foo.method());
* </code></pre>
*
* Due to the defined scope of the static mock, it returns to its original behavior once the scope is released. To define mock
* behavior and to verify static method invocations, use the <code>MockedStatic</code> that is returned.
* <p>
*
* <h3 id="49">49. <a class="meaningful_link" href="#mocked_construction" name="mocked_construction">Mocking object construction</a> (since 3.5.0)</h3>
*
* When using the <a href="#0.2">inline mock maker</a>, it is possible to generate mocks on constructor invocations within the current
* thread and a user-defined scope. This way, Mockito assures that concurrently and sequentially running tests do not interfere.
*
* To make sure a constructor mocks remain temporary, it is recommended to define the scope within a try-with-resources construct.
* In the following example, the <code>Foo</code> type's construction would generate a mock:
*
* <pre class="code"><code class="java">
* assertEquals("foo", new Foo().method());
* try (MockedConstruction<Foo> mocked = mockConstruction(Foo.class)) {
* Foo foo = new Foo();
* when(foo.method()).thenReturn("bar");
* assertEquals("bar", foo.method());
* verify(foo).method();
* }
* assertEquals("foo", new Foo().method());
* </code></pre>
*
* Due to the defined scope of the mocked construction, object construction returns to its original behavior once the scope is
* released. To define mock behavior and to verify method invocations, use the <code>MockedConstruction</code> that is returned.
* <p>
*
* <h3 id="50">50. <a class="meaningful_link" href="#proxy_mock_maker" name="proxy_mock_maker">Avoiding code generation when only interfaces are mocked</a> (since 3.12.2)</h3>
*
* The JVM offers the {@link java.lang.reflect.Proxy} facility for creating dynamic proxies of | but |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java | {
"start": 1905,
"end": 5466
} | class ____ extends ESTestCase {
public void testSerialization() throws Exception {
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
Set<DiscoveryNode> nodes = new HashSet<>();
int numShards = randomIntBetween(1, 10);
ClusterSearchShardsGroup[] clusterSearchShardsGroups = new ClusterSearchShardsGroup[numShards];
for (int i = 0; i < numShards; i++) {
String index = randomAlphaOfLengthBetween(3, 10);
ShardId shardId = new ShardId(index, randomAlphaOfLength(12), i);
String nodeId = randomAlphaOfLength(10);
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, randomBoolean(), ShardRoutingState.STARTED);
clusterSearchShardsGroups[i] = new ClusterSearchShardsGroup(shardId, new ShardRouting[] { shardRouting });
DiscoveryNodeUtils.Builder node = DiscoveryNodeUtils.builder(shardRouting.currentNodeId())
.address(new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)))
.version(randomCompatibleVersion(random(), Version.CURRENT), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current());
nodes.add(node.build());
AliasFilter aliasFilter;
if (randomBoolean()) {
aliasFilter = AliasFilter.of(RandomQueryBuilder.createQuery(random()), "alias-" + index);
} else {
aliasFilter = AliasFilter.EMPTY;
}
indicesAndFilters.put(index, aliasFilter);
}
ClusterSearchShardsResponse clusterSearchShardsResponse = new ClusterSearchShardsResponse(
clusterSearchShardsGroups,
nodes.toArray(new DiscoveryNode[nodes.size()]),
indicesAndFilters
);
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(searchModule.getNamedWriteables());
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random());
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setTransportVersion(version);
clusterSearchShardsResponse.writeTo(out);
try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) {
in.setTransportVersion(version);
ClusterSearchShardsResponse deserialized = new ClusterSearchShardsResponse(in);
assertArrayEquals(clusterSearchShardsResponse.getNodes(), deserialized.getNodes());
assertEquals(clusterSearchShardsResponse.getGroups().length, deserialized.getGroups().length);
for (int i = 0; i < clusterSearchShardsResponse.getGroups().length; i++) {
ClusterSearchShardsGroup clusterSearchShardsGroup = clusterSearchShardsResponse.getGroups()[i];
ClusterSearchShardsGroup deserializedGroup = deserialized.getGroups()[i];
assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId());
assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards());
}
assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters());
}
}
}
}
| ClusterSearchShardsResponseTests |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/AbstractAutoCloseableRegistry.java | {
"start": 1452,
"end": 1645
} | class ____ thread-safe.
*
* @param <C> Type of the closeable this registers
* @param <T> Type for potential meta data associated with the registering closeables
*/
@Internal
public abstract | are |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterState.java | {
"start": 1155,
"end": 2551
} | enum ____ {
/** Newly registered subcluster, before the first heartbeat. */
SC_NEW,
/** Subcluster is registered and the RM sent a heartbeat recently. */
SC_RUNNING,
/** Subcluster is unhealthy. */
SC_UNHEALTHY,
/** Subcluster is in the process of being out of service. */
SC_DECOMMISSIONING,
/** Subcluster is out of service. */
SC_DECOMMISSIONED,
/** RM has not sent a heartbeat for some configured time threshold. */
SC_LOST,
/** Subcluster has unregistered. */
SC_UNREGISTERED;
public boolean isUsable() {
return (this == SC_RUNNING || this == SC_NEW);
}
public boolean isActive() {
return this == SC_RUNNING;
}
public boolean isFinal() {
return (this == SC_UNREGISTERED || this == SC_DECOMMISSIONED
|| this == SC_LOST);
}
public static final Logger LOG =
LoggerFactory.getLogger(SubClusterState.class);
/**
* Convert a string into {@code SubClusterState}.
*
* @param state the string to convert in SubClusterState
* @return the respective {@code SubClusterState}
*/
public static SubClusterState fromString(String state) {
try {
return SubClusterState.valueOf(state);
} catch (Exception e) {
LOG.error("Invalid SubCluster State value({}) in the StateStore does not"
+ " match with the YARN Federation standard.", state);
return null;
}
}
}
| SubClusterState |
java | quarkusio__quarkus | integration-tests/smallrye-metrics/src/test/java/io/quarkus/it/metrics/MetricsITCase.java | {
"start": 117,
"end": 165
} | class ____ extends MetricsTestCase {
}
| MetricsITCase |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java | {
"start": 38790,
"end": 42506
} | class ____ to"
+ " specified comparator");
}
outWriter = new MapFile.Writer(conf, outMapFile,
MapFile.Writer.keyClass(keyClass),
MapFile.Writer.valueClass(valueClass));
}
/**
* Merge all input files to output map file.<br>
* 1. Read first key/value from all input files to keys/values array. <br>
* 2. Select the least key and corresponding value. <br>
* 3. Write the selected key and value to output file. <br>
* 4. Replace the already written key/value in keys/values arrays with the
* next key/value from the selected input <br>
* 5. Repeat step 2-4 till all keys are read. <br>
*/
private void mergePass() throws IOException {
// re-usable array
WritableComparable[] keys = new WritableComparable[inReaders.length];
Writable[] values = new Writable[inReaders.length];
// Read first key/value from all inputs
for (int i = 0; i < inReaders.length; i++) {
keys[i] = ReflectionUtils.newInstance(keyClass, null);
values[i] = ReflectionUtils.newInstance(valueClass, null);
if (!inReaders[i].next(keys[i], values[i])) {
// Handle empty files
keys[i] = null;
values[i] = null;
}
}
do {
int currentEntry = -1;
WritableComparable currentKey = null;
Writable currentValue = null;
for (int i = 0; i < keys.length; i++) {
if (keys[i] == null) {
// Skip Readers reached EOF
continue;
}
if (currentKey == null || comparator.compare(currentKey, keys[i]) > 0) {
currentEntry = i;
currentKey = keys[i];
currentValue = values[i];
}
}
if (currentKey == null) {
// Merge Complete
break;
}
// Write the selected key/value to merge stream
outWriter.append(currentKey, currentValue);
// Replace the already written key/value in keys/values arrays with the
// next key/value from the selected input
if (!inReaders[currentEntry].next(keys[currentEntry],
values[currentEntry])) {
// EOF for this file
keys[currentEntry] = null;
values[currentEntry] = null;
}
} while (true);
}
private void close() throws IOException {
for (int i = 0; i < inReaders.length; i++) {
IOUtils.closeStream(inReaders[i]);
inReaders[i] = null;
}
if (outWriter != null) {
outWriter.close();
outWriter = null;
}
}
}
public static void main(String[] args) throws Exception {
String usage = "Usage: MapFile inFile outFile";
if (args.length != 2) {
System.err.println(usage);
System.exit(-1);
}
String in = args[0];
String out = args[1];
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
MapFile.Reader reader = null;
try {
reader = new MapFile.Reader(fs, in, conf);
WritableComparable<?> key = ReflectionUtils.newInstance(
reader.getKeyClass().asSubclass(WritableComparable.class), conf);
Writable value = ReflectionUtils.newInstance(reader.getValueClass()
.asSubclass(Writable.class), conf);
try (MapFile.Writer writer = new MapFile.Writer(conf, fs, out,
reader.getKeyClass().asSubclass(WritableComparable.class),
reader.getValueClass())) {
while (reader.next(key, value)) { // copy all entries
writer.append(key, value);
}
}
} finally {
IOUtils.cleanupWithLogger(LOG, reader);
}
}
}
| compared |
java | quarkusio__quarkus | integration-tests/grpc-plain-text-mutiny/src/main/java/io/quarkus/grpc/examples/hello/HelloWorldService.java | {
"start": 395,
"end": 1546
} | class ____ extends MutinyGreeterGrpc.GreeterImplBase {
AtomicInteger counter = new AtomicInteger();
@Override
public Uni<HelloReply> sayHello(HelloRequest request) {
int count = counter.incrementAndGet();
String name = request.getName();
return Uni.createFrom().item("Hello " + name)
.map(res -> HelloReply.newBuilder().setMessage(res).setCount(count).build());
}
@Override
public Uni<HelloReply> greeting(LanguageSpec request) {
return Uni.createFrom().item(() -> {
String res = null;
switch (request.getSelectedLanguage()) {
case FRENCH:
res = "Bonjour!";
break;
case SPANISH:
res = "Hola!";
break;
case ENGLISH:
res = "Hello!";
break;
case UNRECOGNIZED:
res = "Blurp!";
break;
}
return res;
})
.map(res -> HelloReply.newBuilder().setMessage(res).build());
}
}
| HelloWorldService |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java | {
"start": 738,
"end": 1561
} | class ____ extends SimpleCollector {
private Scorable scorer;
private float maxScore = Float.NEGATIVE_INFINITY;
private boolean hasHits = false;
@Override
public void setScorer(Scorable scorer) {
this.scorer = scorer;
}
@Override
public ScoreMode scoreMode() {
// Could be TOP_SCORES but it is always used in a MultiCollector anyway, so this saves some wrapping.
return ScoreMode.COMPLETE;
}
@Override
public void collect(int doc) throws IOException {
hasHits = true;
maxScore = Math.max(maxScore, scorer.score());
}
/**
* Get the maximum score. This returns {@link Float#NaN} if no hits were
* collected.
*/
public float getMaxScore() {
return hasHits ? maxScore : Float.NaN;
}
}
| MaxScoreCollector |
java | apache__kafka | streams/test-utils/src/main/java/org/apache/kafka/streams/TopologyTestDriver.java | {
"start": 11481,
"end": 12149
} | class ____ the generic
* {@link #getStateStore(String)} as well as store-type specific methods so that your tests can check the underlying
* state store(s) used by your topology's processors.
* In our previous example, after we supplied a single input message and checked the three output messages, our test
* could also check the key value store to verify the processor correctly added, removed, or updated internal state.
* Or, our test might have pre-populated some state <em>before</em> submitting the input message, and verified afterward
* that the processor(s) correctly updated the state.
*
* @see TestInputTopic
* @see TestOutputTopic
*/
public | provides |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit4/FailingBeforeAndAfterMethodsSpringRunnerTests.java | {
"start": 7818,
"end": 8013
} | class ____ {
@Test
public void testNothing() {
}
@AfterTransaction
public void afterTransaction() {
fail("always failing afterTransaction()");
}
}
}
| FailingAfterTransactionTestCase |
java | netty__netty | resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverOptions.java | {
"start": 2030,
"end": 2566
} | class ____ {
private int ndots = 1;
private int timeout = 5;
private int attempts = 16;
private Builder() {
}
void setNdots(int ndots) {
this.ndots = ndots;
}
void setTimeout(int timeout) {
this.timeout = timeout;
}
void setAttempts(int attempts) {
this.attempts = attempts;
}
UnixResolverOptions build() {
return new UnixResolverOptions(ndots, timeout, attempts);
}
}
}
| Builder |
java | elastic__elasticsearch | libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslDiagnosticsTests.java | {
"start": 1497,
"end": 59801
} | class ____ extends ESTestCase {
// Some constants for use in mock certificates
private static final byte[] MOCK_ENCODING_1 = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66 };
private static final String MOCK_FINGERPRINT_1 = "1f8ac10f23c5b5bc1167bda84b833e5c057a77d2";
private static final byte[] MOCK_ENCODING_2 = { 0x62, 0x63, 0x64, 0x65, 0x66, 0x67 };
private static final String MOCK_FINGERPRINT_2 = "836d472783f4a210cfa3ab5621f757d1a2964aca";
private static final byte[] MOCK_ENCODING_3 = { 0x63, 0x64, 0x65, 0x66, 0x67, 0x68 };
private static final String MOCK_FINGERPRINT_3 = "da8e062d74919f549a9764c24ab0fcde3af3719f";
private static final byte[] MOCK_ENCODING_4 = { 0x64, 0x65, 0x66, 0x67, 0x68, 0x69 };
private static final String MOCK_FINGERPRINT_4 = "5d96965bfae50bf2be0d6259eb87a6cc9f5d0b26";
private static final String MOCK_NOW = "2022-12-30T12:34:56.789Z";
private static final String MOCK_NOT_BEFORE = "2022-12-01T00:00:00Z";
private static final String MOCK_NOT_AFTER = "2023-02-01T00:00:00Z";
private SslDiagnostics diagnostics;
@Before
public void setUpDiagnostics() {
this.diagnostics = buildSslDiagnostics(MOCK_NOW);
}
private SslDiagnostics buildSslDiagnostics(String mockTime) {
return new SslDiagnostics(Clock.fixed(Instant.parse(mockTime), ZoneOffset.UTC));
}
public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt", "ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1];"
+ " the certificate is signed by"
+ " (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc] {trusted issuer})"
+ " which is self-issued; the [CN=Test CA 1] certificate is trusted in this ssl context ([xpack.http.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsntTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1];"
+ " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])"
+ " which is self-issued; the [CN=Test CA 1] certificate is not trusted in this ssl context ([xpack.http.ssl])"
)
);
}
public void testDiagnosticMessageWithPartialChainAndUnknownTrustedIssuers() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = null;
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the server did not provide a copy of the issuing certificate in the certificate chain"
)
);
}
public void testDiagnosticMessageWithFullChainAndUnknownTrustedIssuers() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = null;
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1];"
+ " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])"
+ " which is self-issued"
)
);
}
public void testDiagnosticMessageWhenServerFullCertChainIsntTrustedButMimicIssuerExists() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1-b/ca.crt", "ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1];"
+ " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])"
+ " which is self-issued; the [CN=Test CA 1] certificate is not trusted in this ssl context ([xpack.http.ssl]);"
+ " this ssl context does trust a certificate with subject [CN=Test CA 1]"
+ " but the trusted certificate has fingerprint [b095bf2526be20783e1f26dfd69c7aae910e3663]"
)
);
}
public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyAndTheCertAuthIsTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt", "ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the server did not provide a copy of the issuing certificate in the certificate chain;"
+ " the issuing certificate with fingerprint [2b7b0416391bdf86502505c23149022d2213dadc]"
+ " is trusted in this ssl context ([xpack.http.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyButTheCertAuthIsNotTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the server did not provide a copy of the issuing certificate in the certificate chain;"
+ " this ssl context ([xpack.http.ssl]) is not configured to trust that issuer"
+ " but trusts [2] other issuers ([CN=Test CA 2, CN=Test CA 3])"
)
);
}
public void testDiagnosticMessageWhenServerTrustsManyCAs() throws Exception {
final X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.2");
final Map<String, List<X509Certificate>> trustIssuers = new HashMap<>();
final X509Certificate dummyCa = loadCertificate("ca2/ca.crt");
final int numberOfCAs = randomIntBetween(30, 50);
for (int i = 0; i < numberOfCAs; i++) {
trustIssuers.put("CN=Authority-" + i + ",OU=security,DC=example,DC=net", randomList(1, 3, () -> dummyCa));
}
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.CLIENT,
session,
"xpack.security.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with client at [192.168.1.2];"
+ " the client provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the client did not provide a copy of the issuing certificate in the certificate chain;"
+ " this ssl context ([xpack.security.http.ssl]) is not configured to trust that issuer"
+ " but trusts ["
+ numberOfCAs
+ "] other issuers"
)
);
}
public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyWithMimicIssuer() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1-b/ca.crt", "ca2/ca.crt", "ca3/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the server did not provide a copy of the issuing certificate in the certificate chain;"
+ " this ssl context ([xpack.http.ssl]) trusts [1] certificate with subject name [CN=Test CA 1]"
+ " and fingerprint [b095bf2526be20783e1f26dfd69c7aae910e3663] but the signatures do not match"
)
);
}
public void testDiagnosticMessageWhenServerProvidesEndCertificateWithMultipleMimicIssuers() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.9");
final X509Certificate ca1b = loadCertificate("ca1-b/ca.crt");
final Map<String, List<X509Certificate>> trustIssuers = trust(ca1b, cloneCertificateAsMock(ca1b));
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.9];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the server did not provide a copy of the issuing certificate in the certificate chain;"
+ " this ssl context ([xpack.http.ssl]) trusts [2] certificates with subject name [CN=Test CA 1]"
+ " and fingerprint [b095bf2526be20783e1f26dfd69c7aae910e3663], fingerprint ["
+ MOCK_FINGERPRINT_1
+ "]"
+ " but the signatures do not match"
)
);
}
public void testDiagnosticMessageWhenServerProvidePartialChainFromTrustedCA() throws Exception {
final X509Certificate rootCA = mockCertificateWithIssuer(
"CN=root-ca,DC=example,DC=com",
MOCK_ENCODING_1,
Collections.emptyList(),
null
);
final X509Certificate issuingCA = mockCertificateWithIssuer(
"CN=issuing-ca,DC=example,DC=com",
MOCK_ENCODING_2,
Collections.emptyList(),
rootCA
);
final X509Certificate localCA = mockCertificateWithIssuer(
"CN=ca,OU=windows,DC=example,DC=com",
MOCK_ENCODING_3,
Collections.emptyList(),
issuingCA
);
final X509Certificate endCert = mockCertificateWithIssuer(
"CN=elastic1,OU=windows,DC=example,DC=com",
MOCK_ENCODING_4,
Collections.emptyList(),
localCA
);
final X509Certificate[] chain = { endCert, localCA, issuingCA };
final SSLSession session = session("192.168.1.5");
final Map<String, List<X509Certificate>> trustIssuers = trust(issuingCA, rootCA);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.security.authc.realms.ldap.ldap1.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.5];"
+ " the server provided a certificate with subject name [CN=elastic1,OU=windows,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_4
+ "],"
+ " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is issued by [CN=ca,OU=windows,DC=example,DC=com];"
+ " the certificate is"
+ " signed by (subject [CN=ca,OU=windows,DC=example,DC=com] fingerprint ["
+ MOCK_FINGERPRINT_3
+ "])"
+ " signed by (subject [CN=issuing-ca,DC=example,DC=com] fingerprint ["
+ MOCK_FINGERPRINT_2
+ "] {trusted issuer})"
+ " which is issued by [CN=root-ca,DC=example,DC=com] (but that issuer certificate was not provided in the chain);"
+ " the issuing certificate with fingerprint ["
+ MOCK_FINGERPRINT_1
+ "]"
+ " is trusted in this ssl context ([xpack.security.authc.realms.ldap.ldap1.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerProvidePartialChainFromUntrustedCA() throws Exception {
final X509Certificate rootCA = mockCertificateWithIssuer(
"CN=root-ca,DC=example,DC=com",
MOCK_ENCODING_1,
Collections.emptyList(),
null
);
final X509Certificate issuingCA = mockCertificateWithIssuer(
"CN=issuing-ca,DC=example,DC=com",
MOCK_ENCODING_2,
Collections.emptyList(),
rootCA
);
final X509Certificate localCA = mockCertificateWithIssuer(
"CN=ca,OU=windows,DC=example,DC=com",
MOCK_ENCODING_3,
Collections.emptyList(),
issuingCA
);
final X509Certificate endCert = mockCertificateWithIssuer(
"CN=elastic1,OU=windows,DC=example,DC=com",
MOCK_ENCODING_4,
Collections.emptyList(),
localCA
);
final X509Certificate[] chain = { endCert, localCA, issuingCA };
final SSLSession session = session("192.168.1.6");
final Map<String, List<X509Certificate>> trustIssuers = trust(Collections.emptyList());
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.security.authc.realms.ldap.ldap1.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.6];"
+ " the server provided a certificate with subject name [CN=elastic1,OU=windows,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_4
+ "],"
+ " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is issued by [CN=ca,OU=windows,DC=example,DC=com];"
+ " the certificate is"
+ " signed by (subject [CN=ca,OU=windows,DC=example,DC=com] fingerprint ["
+ MOCK_FINGERPRINT_3
+ "])"
+ " signed by (subject [CN=issuing-ca,DC=example,DC=com] fingerprint ["
+ MOCK_FINGERPRINT_2
+ "])"
+ " which is issued by [CN=root-ca,DC=example,DC=com] (but that issuer certificate was not provided in the chain);"
+ " this ssl context ([xpack.security.authc.realms.ldap.ldap1.ssl])"
+ " is not configured to trust that issuer or any other issuer"
)
);
}
public void testDiagnosticMessageWhenServerProvidesASelfSignedCertThatIsDirectlyTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt", "ca2/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=Test CA 1]"
+ ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:38:26Z] and [2046-05-20T07:38:26Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is self-issued; the [CN=Test CA 1]"
+ " certificate is trusted in this ssl context ([xpack.http.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerProvidesASelfSignedCertThatIsNotTrusted() throws Exception {
X509Certificate[] chain = loadCertChain("ca1/ca.crt");
final SSLSession session = session("192.168.10.10");
final Map<String, List<X509Certificate>> trustIssuers = Collections.emptyMap();
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.10.10];"
+ " the server provided a certificate with subject name [CN=Test CA 1]"
+ ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc], no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:38:26Z] and [2046-05-20T07:38:26Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is self-issued; the [CN=Test CA 1]"
+ " certificate is not trusted in this ssl context ([xpack.http.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerProvidesASelfSignedCertWithMimicName() throws Exception {
X509Certificate[] chain = loadCertChain("ca1/ca.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1-b/ca.crt", "ca2/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.1];"
+ " the server provided a certificate with subject name [CN=Test CA 1]"
+ ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:38:26Z] and [2046-05-20T07:38:26Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is self-issued; the [CN=Test CA 1]"
+ " certificate is not trusted in this ssl context ([xpack.http.ssl]);"
+ " this ssl context does trust a certificate with subject [CN=Test CA 1]"
+ " but the trusted certificate has fingerprint [b095bf2526be20783e1f26dfd69c7aae910e3663]"
)
);
}
public void testDiagnosticMessageWithEmptyChain() throws Exception {
X509Certificate[] chain = new X509Certificate[0];
final SSLSession session = session("192.168.1.2");
final Map<String, List<X509Certificate>> trustIssuers = Collections.emptyMap();
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo("failed to establish trust with server at [192.168.1.2];" + " the server did not provide a certificate")
);
}
public void testDiagnosticMessageWhenServerProvidesAnEmailSubjAltName() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final X509Certificate certificate = mockCertificateWithIssuer(
subjectName,
MOCK_ENCODING_1,
Collections.singletonList(List.of(1, "foo@example.com")),
null
);
X509Certificate[] chain = new X509Certificate[] { certificate };
final SSLSession session = session("192.168.1.3");
final Map<String, List<X509Certificate>> trustIssuers = trust(certificate);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.monitoring.exporters.elastic-cloud.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.3];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_1
+ "],"
+ " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any DNS/IP subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is trusted in"
+ " this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerCertificateHasNoKeyUsage() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final X509Certificate certificate = mockCertificateWithIssuer(
subjectName,
MOCK_ENCODING_1,
Collections.singletonList(List.of(1, "foo@example.com")),
null,
null,
null
);
X509Certificate[] chain = new X509Certificate[] { certificate };
final String peerHost = "192.168.1." + randomIntBetween(1, 128);
final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS);
final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS);
final SSLSession session = session(peerHost, cipherSuite, protocol);
final Map<String, List<X509Certificate>> trustIssuers = trust(certificate);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.monitoring.exporters.elastic-cloud.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at ["
+ peerHost
+ "];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_1
+ "], no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite ["
+ cipherSuite
+ "] and protocol ["
+ protocol
+ "];"
+ " the certificate does not have any DNS/IP subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is trusted"
+ " in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerCertificateHasKeyUsageAndNoExtendedKeyUsage() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final boolean[] keyUsage = { true, false, true, true, true, false, false, false, false, false };
final X509Certificate certificate = mockCertificateWithIssuer(
subjectName,
MOCK_ENCODING_1,
Collections.singletonList(List.of(1, "foo@example.com")),
null,
keyUsage,
null
);
X509Certificate[] chain = new X509Certificate[] { certificate };
final String peerHost = "192.168.1." + randomIntBetween(1, 128);
final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS);
final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS);
final SSLSession session = session(peerHost, cipherSuite, protocol);
final Map<String, List<X509Certificate>> trustIssuers = trust(certificate);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.monitoring.exporters.elastic-cloud.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at ["
+ peerHost
+ "];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_1
+ "],"
+ " keyUsage [digitalSignature, keyEncipherment, dataEncipherment, keyAgreement]"
+ " and no extendedKeyUsage;"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite ["
+ cipherSuite
+ "] and protocol ["
+ protocol
+ "];"
+ " the certificate does not have any DNS/IP subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is trusted"
+ " in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerCertificateHasKeyUsageAndExtendedKeyUsage() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final boolean[] keyUsage = { false, false, false, false, false, false, false, true, false };
final X509Certificate certificate = mockCertificateWithIssuer(
subjectName,
MOCK_ENCODING_1,
Collections.singletonList(List.of(1, "foo@example.com")),
null,
keyUsage,
List.of("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.2")
);
X509Certificate[] chain = new X509Certificate[] { certificate };
final String peerHost = "192.168.1." + randomIntBetween(1, 128);
final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS);
final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS);
final SSLSession session = session(peerHost, cipherSuite, protocol);
final Map<String, List<X509Certificate>> trustIssuers = trust(certificate);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.monitoring.exporters.elastic-cloud.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at ["
+ peerHost
+ "];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_1
+ "],"
+ " keyUsage [encipherOnly] and extendedKeyUsage [serverAuth, clientAuth];"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite ["
+ cipherSuite
+ "] and protocol ["
+ protocol
+ "];"
+ " the certificate does not have any DNS/IP subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is trusted"
+ " in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])"
)
);
}
public void testDiagnosticMessageWhenServerCertificateHasOversizedKeyUsageAndUnrecognisedExtendedKeyUsage() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final boolean[] keyUsage = { false, false, false, false, false, true, false, false, false, /* extra --> */ true, false, true };
final X509Certificate certificate = mockCertificateWithIssuer(
subjectName,
MOCK_ENCODING_1,
Collections.singletonList(List.of(1, "foo@example.com")),
null,
keyUsage,
List.of("1.3.6.1.5.5.7.3.8", "1.3.6.1.5.5.7.3.12")
);
X509Certificate[] chain = new X509Certificate[] { certificate };
final String peerHost = "192.168.1." + randomIntBetween(1, 128);
final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS);
final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS);
final SSLSession session = session(peerHost, cipherSuite, protocol);
final Map<String, List<X509Certificate>> trustIssuers = trust(certificate);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.monitoring.exporters.elastic-cloud.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at ["
+ peerHost
+ "];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]"
+ ", fingerprint ["
+ MOCK_FINGERPRINT_1
+ "],"
+ " keyUsage [keyCertSign, #9, #11] and extendedKeyUsage [timeStamping, 1.3.6.1.5.5.7.3.12];"
+ " the certificate is valid between ["
+ MOCK_NOT_BEFORE
+ "] and ["
+ MOCK_NOT_AFTER
+ "]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite ["
+ cipherSuite
+ "] and protocol ["
+ protocol
+ "];"
+ " the certificate does not have any DNS/IP subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is trusted"
+ " in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])"
)
);
}
public void testDiagnosticMessageWhenACertificateHasAnInvalidEncoding() throws Exception {
final String subjectName = "CN=foo,DC=example,DC=com";
final X509Certificate certificate = mockCertificateWithIssuer(subjectName, new byte[0], Collections.emptyList(), null);
Mockito.when(certificate.getEncoded()).thenThrow(new CertificateEncodingException("MOCK INVALID ENCODING"));
X509Certificate[] chain = new X509Certificate[] { certificate };
final SSLSession session = session("192.168.1.6");
final Map<String, List<X509Certificate>> trustIssuers = trust(Collections.emptyList());
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.security.transport.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.6];"
+ " the server provided a certificate with subject name [CN=foo,DC=example,DC=com],"
+ " invalid encoding [java.security.cert.CertificateEncodingException: MOCK INVALID ENCODING],"
+ " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];"
+ (" the certificate is valid between [" + MOCK_NOT_BEFORE + "] and [" + MOCK_NOT_AFTER + "]")
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate does not have any subject alternative names;"
+ " the certificate is self-issued;"
+ " the [CN=foo,DC=example,DC=com] certificate is not trusted"
+ " in this ssl context ([xpack.security.transport.ssl])"
)
);
}
public void testDiagnosticMessageWhenCurrentTimeIsBeforeCertificateNotBefore() throws Exception {
final String mockTime = "2000-01-23T01:23:45.678Z";
this.diagnostics = buildSslDiagnostics(mockTime);
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
containsString(
" the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ mockTime
+ "], ** certificate is not yet valid);"
)
);
}
public void testDiagnosticMessageWhenCurrentTimeIsAfterCertificateNotAfter() throws Exception {
final String mockTime = "2050-05-05T05:05:05Z";
this.diagnostics = buildSslDiagnostics(mockTime);
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.1");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.http.ssl",
trustIssuers
);
assertThat(
message,
containsString(
" the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ mockTime
+ "], ** certificate has expired);"
)
);
}
public void testDiagnosticMessageForClientCertificate() throws Exception {
X509Certificate[] chain = loadCertChain("cert1/cert1.crt");
final SSLSession session = session("192.168.1.7");
final Map<String, List<X509Certificate>> trustIssuers = trust("ca1/ca.crt");
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.CLIENT,
session,
"xpack.security.transport.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with client at [192.168.1.7];"
+ " the client provided a certificate with subject name [CN=cert1]"
+ ", fingerprint [3bebe388a66362784afd6c51a9000961a4e10050], no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate is issued by [CN=Test CA 1]"
+ " but the client did not provide a copy of the issuing certificate in the certificate chain;"
+ " the issuing certificate with fingerprint [2b7b0416391bdf86502505c23149022d2213dadc]"
+ " is trusted in this ssl context ([xpack.security.transport.ssl])"
)
);
}
public void testDiagnosticMessageWhenCaHasNewIssuingCertificate() throws Exception {
// From time to time, CAs issue updated certificates based on the same underlying key-pair.
// For example, they might move to new signature algorithms (dropping SHA-1), or the certificate might be
// expiring and need to be reissued with a new expiry date.
// In this test, we assume that the server provides a certificate that is signed by the new CA cert, and we trust the old CA cert
// Our diagnostic message should make clear that we trust the CA, but using a different cert fingerprint.
// Note: This would normally succeed, so we wouldn't have an exception to diagnose, but it's possible that the hostname is wrong.
final X509Certificate newCaCert = loadCertificate("ca1/ca.crt");
final X509Certificate oldCaCert = cloneCertificateAsMock(newCaCert);
X509Certificate[] chain = loadCertChain("cert1/cert1.crt", "ca1/ca.crt"); // uses "new" CA
final SSLSession session = session("192.168.1.4");
final Map<String, List<X509Certificate>> trustIssuers = trust(oldCaCert);
final String message = diagnostics.getTrustDiagnosticFailure(
chain,
SslDiagnostics.PeerType.SERVER,
session,
"xpack.security.authc.realms.saml.saml1.ssl",
trustIssuers
);
assertThat(
message,
Matchers.equalTo(
"failed to establish trust with server at [192.168.1.4];"
+ " the server provided a certificate with subject name [CN=cert1],"
+ " fingerprint [3bebe388a66362784afd6c51a9000961a4e10050],"
+ " no keyUsage and no extendedKeyUsage;"
+ " the certificate is valid between [2019-01-03T07:40:42Z] and [2046-05-20T07:40:42Z]"
+ " (current time is ["
+ MOCK_NOW
+ "], certificate dates are valid);"
+ " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];"
+ " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];"
+ " the certificate is issued by [CN=Test CA 1];"
+ " the certificate is signed by (subject [CN=Test CA 1]"
+ " fingerprint [2b7b0416391bdf86502505c23149022d2213dadc] {trusted issuer})"
+ " which is self-issued;"
+ " the [CN=Test CA 1] certificate is trusted in this ssl context ([xpack.security.authc.realms.saml.saml1.ssl])"
+ " because we trust a certificate with fingerprint [1f8ac10f23c5b5bc1167bda84b833e5c057a77d2]"
+ " for the same public key"
)
);
}
public X509Certificate cloneCertificateAsMock(X509Certificate clone) throws CertificateParsingException, CertificateEncodingException {
final X509Certificate cert = Mockito.mock(X509Certificate.class);
final X500Principal principal = clone.getSubjectX500Principal();
Mockito.when(cert.getSubjectX500Principal()).thenReturn(principal);
Mockito.when(cert.getSubjectAlternativeNames()).thenReturn(clone.getSubjectAlternativeNames());
Mockito.when(cert.getIssuerX500Principal()).thenReturn(clone.getIssuerX500Principal());
Mockito.when(cert.getPublicKey()).thenReturn(clone.getPublicKey());
Mockito.when(cert.getEncoded()).thenReturn(new byte[] { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66 });
return cert;
}
public X509Certificate mockCertificateWithIssuer(
String principal,
byte[] encoding,
List<List<?>> subjAltNames,
@Nullable X509Certificate issuer
) throws CertificateException {
final List<String> extendedKeyUsage = List.of("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.3");
final boolean[] keyUsage = { true, true, false, false, false, false, false, false, false };
return mockCertificateWithIssuer(principal, encoding, subjAltNames, issuer, keyUsage, extendedKeyUsage);
}
private X509Certificate mockCertificateWithIssuer(
String principal,
byte[] encoding,
List<List<?>> subjAltNames,
X509Certificate issuer,
boolean[] keyUsage,
List<String> extendedKeyUsage
) throws CertificateParsingException, CertificateEncodingException {
final X509Certificate cert = Mockito.mock(X509Certificate.class);
final X500Principal x500Principal = new X500Principal(principal);
final PublicKey key = Mockito.mock(PublicKey.class);
final Date notBefore = new Date(Instant.parse(MOCK_NOT_BEFORE).toEpochMilli());
final Date notAfter = new Date(Instant.parse(MOCK_NOT_AFTER).toEpochMilli());
Mockito.when(cert.getSubjectX500Principal()).thenReturn(x500Principal);
Mockito.when(cert.getSubjectAlternativeNames()).thenReturn(subjAltNames);
final X500Principal issuerPrincipal = issuer == null ? x500Principal : issuer.getSubjectX500Principal();
Mockito.when(cert.getIssuerX500Principal()).thenReturn(issuerPrincipal);
Mockito.when(cert.getPublicKey()).thenReturn(key);
Mockito.when(cert.getEncoded()).thenReturn(encoding);
Mockito.when(cert.getExtendedKeyUsage()).thenReturn(extendedKeyUsage);
Mockito.when(cert.getKeyUsage()).thenReturn(keyUsage);
Mockito.when(cert.getNotBefore()).thenReturn(notBefore);
Mockito.when(cert.getNotAfter()).thenReturn(notAfter);
return cert;
}
private X509Certificate[] loadCertChain(String... names) throws CertificateException, IOException {
final List<Path> paths = Stream.of(names).map(p -> "/certs/" + p).map(this::getDataPath).collect(Collectors.toList());
return PemUtils.readCertificates(paths).stream().map(X509Certificate.class::cast).toArray(X509Certificate[]::new);
}
private X509Certificate loadCertificate(String name) throws CertificateException, IOException {
final Path path = getDataPath("/certs/" + name);
final List<Certificate> certificates = PemUtils.readCertificates(Collections.singleton(path));
if (certificates.size() == 1) {
return (X509Certificate) certificates.get(0);
} else {
throw new IllegalStateException(
"Expected 1 certificate in [" + path.toAbsolutePath() + "] but found [" + certificates.size() + "] - " + certificates
);
}
}
private Map<String, List<X509Certificate>> trust(String... certNames) throws CertificateException, IOException {
final List<Path> paths = Stream.of(certNames).map(p -> "/certs/" + p).map(this::getDataPath).collect(Collectors.toList());
return trust(PemUtils.readCertificates(paths));
}
private Map<String, List<X509Certificate>> trust(X509Certificate... caCerts) {
return trust(Arrays.asList(caCerts));
}
private Map<String, List<X509Certificate>> trust(Collection<? extends Certificate> caCerts) {
return caCerts.stream()
.map(X509Certificate.class::cast)
.collect(
Collectors.toMap(
x -> x.getSubjectX500Principal().getName(),
List::of,
(List<X509Certificate> a, List<X509Certificate> b) -> {
List<X509Certificate> merge = new ArrayList<>();
merge.addAll(a);
merge.addAll(b);
return merge;
}
)
);
}
private SSLSession session(String peerHost) {
return session(peerHost, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", "SSLv3");
}
private SSLSession session(String peerHost, String cipherSuite, String protocol) {
final SSLSession mock = Mockito.mock(SSLSession.class);
Mockito.when(mock.getPeerHost()).thenReturn(peerHost);
Mockito.when(mock.getCipherSuite()).thenReturn(cipherSuite);
Mockito.when(mock.getProtocol()).thenReturn(protocol);
return mock;
}
}
| SslDiagnosticsTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderElectionEvent.java | {
"start": 2257,
"end": 2958
} | class ____ extends LeaderElectionEvent {
private final String componentId;
private final LeaderInformation leaderInformation;
LeaderInformationChangeEvent(String componentId, LeaderInformation leaderInformation) {
this.componentId = componentId;
this.leaderInformation = leaderInformation;
}
public LeaderInformation getLeaderInformation() {
return leaderInformation;
}
public String getComponentId() {
return componentId;
}
@Override
public boolean isLeaderInformationChangeEvent() {
return true;
}
}
public static | LeaderInformationChangeEvent |
java | elastic__elasticsearch | qa/vector/src/main/java/org/elasticsearch/test/knn/KnnIndexTester.java | {
"start": 3777,
"end": 3863
} | enum ____ {
HNSW,
FLAT,
IVF,
GPU_HNSW
}
| IndexType |
java | apache__hadoop | hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/skylinestore/exceptions/EmptyResourceSkylineException.java | {
"start": 1002,
"end": 1229
} | class ____ extends SkylineStoreException {
private static final long serialVersionUID = -684069387367879218L;
public EmptyResourceSkylineException(final String message) {
super(message);
}
}
| EmptyResourceSkylineException |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java | {
"start": 10009,
"end": 10453
} | class ____ annotation are {@code null}.
* @since 3.4
*/
public static List<Field> getFieldsListWithAnnotation(final Class<?> cls, final Class<? extends Annotation> annotationCls) {
Objects.requireNonNull(annotationCls, "annotationCls");
return getAllFieldsList(cls).stream().filter(field -> field.getAnnotation(annotationCls) != null).collect(Collectors.toList());
}
/**
* Gets all fields of the given | or |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/validatorfactory/MyEmailValidatorFactoryCustomizer.java | {
"start": 378,
"end": 886
} | class ____ implements ValidatorFactoryCustomizer {
@Override
public void customize(BaseHibernateValidatorConfiguration<?> configuration) {
ConstraintMapping constraintMapping = configuration.createConstraintMapping();
constraintMapping
.constraintDefinition(Email.class)
.includeExistingValidators(false)
.validatedBy(MyEmailValidator.class);
configuration.addMapping(constraintMapping);
}
}
| MyEmailValidatorFactoryCustomizer |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/headers/DefaultBuilderHeadersTest.java | {
"start": 1107,
"end": 1782
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withApplicationRoot(jar -> {
});
@TestHTTPResource
URI baseUri;
@Test
void headers() {
RestClientBuilder builder = RestClientBuilder.newBuilder().baseUri("http://localhost:8080/");
builder.register(ReturnWithAllDuplicateClientHeadersFilter.class);
builder.header("InterfaceAndBuilderHeader", "builder");
ClientBuilderHeaderMethodClient client = builder.build(ClientBuilderHeaderMethodClient.class);
checkHeaders(client.getAllHeaders("headerparam"), "method");
}
@Path("/")
public | DefaultBuilderHeadersTest |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/WorkerStarting.java | {
"start": 1157,
"end": 1463
} | class ____ extends WorkerState {
@JsonCreator
public WorkerStarting(@JsonProperty("taskId") String taskId,
@JsonProperty("spec") TaskSpec spec) {
super(taskId, spec);
}
@Override
public JsonNode status() {
return new TextNode("starting");
}
}
| WorkerStarting |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SchematronEndpointBuilderFactory.java | {
"start": 8792,
"end": 9499
} | class ____ or
* location in the file system.
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path path
* @return the dsl builder
*/
default SchematronEndpointBuilder schematron(String componentName, String path) {
return SchematronEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
static SchematronEndpointBuilder endpointBuilder(String componentName, String path) {
| path |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/subpkg/DeepBean.java | {
"start": 872,
"end": 940
} | class ____ {
public void aMethod(String foo) {
// no-op
}
}
| DeepBean |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/TaskManager.java | {
"start": 19970,
"end": 23582
} | class ____ implements Callable<Void> {
private final String nodeName;
private final long workerId;
private final WorkerState nextState;
UpdateWorkerState(String nodeName, long workerId, WorkerState nextState) {
this.nodeName = nodeName;
this.workerId = workerId;
this.nextState = nextState;
}
@Override
public Void call() throws Exception {
try {
WorkerState prevState = workerStates.get(workerId);
if (prevState == null) {
throw new RuntimeException("Unable to find workerId " + workerId);
}
ManagedTask task = tasks.get(prevState.taskId());
if (task == null) {
throw new RuntimeException("Unable to find taskId " + prevState.taskId());
}
log.debug("Task {}: Updating worker state for {} on {} from {} to {}.",
task.id, workerId, nodeName, prevState, nextState);
workerStates.put(workerId, nextState);
if (nextState.done() && (!prevState.done())) {
handleWorkerCompletion(task, nodeName, (WorkerDone) nextState);
}
} catch (Exception e) {
log.error("Error updating worker state for {} on {}. Stopping worker.",
workerId, nodeName, e);
nodeManagers.get(nodeName).stopWorker(workerId);
}
return null;
}
}
/**
* Handle a worker being completed.
*
* @param task The task that owns the worker.
* @param nodeName The name of the node on which the worker is running.
* @param state The worker state.
*/
private void handleWorkerCompletion(ManagedTask task, String nodeName, WorkerDone state) {
if (state.error().isEmpty()) {
log.info("{}: Worker {} finished with status '{}'",
nodeName, task.id, JsonUtil.toJsonString(state.status()));
} else {
log.warn("{}: Worker {} finished with error '{}' and status '{}'",
nodeName, task.id, state.error(), JsonUtil.toJsonString(state.status()));
task.maybeSetError(state.error());
}
TreeMap<String, Long> activeWorkerIds = task.activeWorkerIds();
if (activeWorkerIds.isEmpty()) {
task.doneMs = time.milliseconds();
task.state = TaskStateType.DONE;
log.info("{}: Task {} is now complete on {} with error: {}",
nodeName, task.id, String.join(", ", task.workerIds.keySet()),
task.error.isEmpty() ? "(none)" : task.error);
} else if ((task.state == TaskStateType.RUNNING) && (!task.error.isEmpty())) {
log.info("{}: task {} stopped with error {}. Stopping worker(s): {}",
nodeName, task.id, task.error, Utils.mkString(activeWorkerIds, "{", "}", ": ", ", "));
task.state = TaskStateType.STOPPING;
for (Map.Entry<String, Long> entry : activeWorkerIds.entrySet()) {
nodeManagers.get(entry.getKey()).stopWorker(entry.getValue());
}
}
}
/**
* Get information about the tasks being managed.
*/
public TasksResponse tasks(TasksRequest request) throws ExecutionException, InterruptedException {
return executor.submit(new GetTasksResponse(request)).get();
}
/**
* Gets information about the tasks being managed. Processed by the state change thread.
*/
| UpdateWorkerState |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/BidirectionalSetTest.java | {
"start": 2371,
"end": 3490
} | class ____ {
@Id
private Long id;
private String type;
@Column(name = "`number`", unique = true)
@NaturalId
private String number;
@ManyToOne
private Person person;
//Getters and setters are omitted for brevity
//end::collections-bidirectional-set-example[]
public Phone() {
}
public Phone(Long id, String type, String number) {
this.id = id;
this.type = type;
this.number = number;
}
public Long getId() {
return id;
}
public String getType() {
return type;
}
public String getNumber() {
return number;
}
public Person getPerson() {
return person;
}
public void setPerson(Person person) {
this.person = person;
}
//tag::collections-bidirectional-set-example[]
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Phone phone = (Phone) o;
return Objects.equals(number, phone.number);
}
@Override
public int hashCode() {
return Objects.hash(number);
}
}
//end::collections-bidirectional-set-example[]
}
| Phone |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java | {
"start": 28721,
"end": 28992
} | class ____ extends ExecutorSubscribableChannel {
private final List<Message<?>> messages = new ArrayList<>();
@Override
public boolean sendInternal(Message<?> message, long timeout) {
this.messages.add(message);
return true;
}
}
private static | TestChannel |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/SessionRestEndpointFactory.java | {
"start": 1710,
"end": 2984
} | enum ____ implements RestEndpointFactory<DispatcherGateway> {
INSTANCE;
@Override
public WebMonitorEndpoint<DispatcherGateway> createRestEndpoint(
Configuration configuration,
LeaderGatewayRetriever<DispatcherGateway> dispatcherGatewayRetriever,
LeaderGatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever,
TransientBlobService transientBlobService,
ScheduledExecutorService executor,
MetricFetcher metricFetcher,
LeaderElection leaderElection,
FatalErrorHandler fatalErrorHandler)
throws Exception {
final RestHandlerConfiguration restHandlerConfiguration =
RestHandlerConfiguration.fromConfiguration(configuration);
return new DispatcherRestEndpoint(
dispatcherGatewayRetriever,
configuration,
restHandlerConfiguration,
resourceManagerGatewayRetriever,
transientBlobService,
executor,
metricFetcher,
leaderElection,
RestEndpointFactory.createExecutionGraphCache(restHandlerConfiguration),
fatalErrorHandler);
}
}
| SessionRestEndpointFactory |
java | apache__rocketmq | tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceBenchTest.java | {
"start": 2490,
"end": 6346
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(MessageStoreUtil.TIERED_STORE_LOGGER_NAME);
private static final String TOPIC_NAME = "TopicTest";
private MessageStoreConfig storeConfig;
private IndexStoreService indexStoreService;
private final LongAdder failureCount = new LongAdder();
@Setup
public void init() throws ClassNotFoundException, NoSuchMethodException {
String storePath = Paths.get(System.getProperty("user.home"), "store_test", "index").toString();
UtilAll.deleteFile(new File(storePath));
UtilAll.deleteFile(new File("./e96d41b2_IndexService"));
storeConfig = new MessageStoreConfig();
storeConfig.setBrokerClusterName("IndexService");
storeConfig.setBrokerName("IndexServiceBroker");
storeConfig.setStorePathRootDir(storePath);
storeConfig.setTieredBackendServiceProvider("org.apache.rocketmq.tieredstore.provider.PosixFileSegment");
storeConfig.setTieredStoreIndexFileMaxHashSlotNum(500 * 1000);
storeConfig.setTieredStoreIndexFileMaxIndexNum(2000 * 1000);
MetadataStore metadataStore = new DefaultMetadataStore(storeConfig);
FlatFileFactory flatFileFactory = new FlatFileFactory(metadataStore, storeConfig);
indexStoreService = new IndexStoreService(flatFileFactory, storePath);
indexStoreService.start();
}
@TearDown()
public void shutdown() throws IOException {
indexStoreService.shutdown();
indexStoreService.destroy();
}
//@Benchmark
@Threads(2)
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 1, time = 1)
@Measurement(iterations = 1, time = 1)
public void doPutThroughputBenchmark() {
for (int i = 0; i < 100; i++) {
AppendResult result = indexStoreService.putKey(
TOPIC_NAME, 123, 2, Collections.singleton(String.valueOf(i)),
i * 100L, i * 100, System.currentTimeMillis());
if (AppendResult.SUCCESS.equals(result)) {
failureCount.increment();
}
}
}
@Threads(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 0)
@Measurement(iterations = 1, time = 1)
public void doGetThroughputBenchmark() throws ExecutionException, InterruptedException {
for (int j = 0; j < 10; j++) {
for (int i = 0; i < storeConfig.getTieredStoreIndexFileMaxIndexNum(); i++) {
indexStoreService.putKey(
"TopicTest", 123, j, Collections.singleton(String.valueOf(i)),
i * 100L, i * 100, System.currentTimeMillis());
}
}
int queryCount = 100 * 10000;
Stopwatch stopwatch = Stopwatch.createStarted();
for (int i = 0; i < queryCount; i++) {
List<IndexItem> indexItems = indexStoreService.queryAsync(TOPIC_NAME, String.valueOf(i),
20, 0, System.currentTimeMillis()).get();
Assert.assertEquals(10, indexItems.size());
List<IndexItem> indexItems2 = indexStoreService.queryAsync(TOPIC_NAME, String.valueOf(i),
5, 0, System.currentTimeMillis()).get();
Assert.assertEquals(5, indexItems2.size());
}
log.info("DoGetThroughputBenchmark test cost: {}ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
}
public static void main(String[] args) throws Exception {
Options opt = new OptionsBuilder()
.include(IndexStoreServiceBenchTest.class.getSimpleName())
.warmupIterations(0)
.measurementIterations(1)
.result("result.json")
.resultFormat(ResultFormatType.JSON)
.build();
new Runner(opt).run();
}
}
| IndexStoreServiceBenchTest |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/AbstractVectorIntegrationTests.java | {
"start": 11097,
"end": 13289
} | interface ____ extends JpaRepository<WithVector, Integer> {
List<WithVector> findAllByCountryAndEmbeddingWithin(String country, Vector embedding, Score distance);
@Query("""
SELECT w FROM org.springframework.data.jpa.repository.AbstractVectorIntegrationTests$WithVector w
WHERE w.country = ?1
AND cosine_distance(w.embedding, :embedding) <= :distance
ORDER BY cosine_distance(w.embedding, :embedding) asc""")
List<WithVector> findAnnotatedByCountryAndEmbeddingWithin(String country, Vector embedding, Score distance);
@Query("""
SELECT w, cosine_distance(w.embedding, :embedding) as distance FROM org.springframework.data.jpa.repository.AbstractVectorIntegrationTests$WithVector w
WHERE w.country = ?1
AND cosine_distance(w.embedding, :embedding) <= :distance
ORDER BY distance asc""")
SearchResults<WithVector> searchAnnotatedByCountryAndEmbeddingWithin(String country, Vector embedding,
Score distance);
@Query("""
SELECT w, cosine_distance(w.embedding, :embedding) as distance FROM org.springframework.data.jpa.repository.AbstractVectorIntegrationTests$WithVector w
WHERE w.country = ?1
AND cosine_distance(w.embedding, :embedding) <= :distance
ORDER BY distance asc""")
SearchResults<WithVector> searchAnnotatedByCountryAndEmbeddingWithin(String country, Vector embedding,
float distance);
SearchResults<WithVector> searchAllByCountryAndEmbeddingWithin(String country, Vector embedding,
Range<Similarity> distance);
SearchResults<WithVector> searchTop5ByCountryAndEmbeddingWithin(String country, Vector embedding, Score distance);
SearchResults<WithVector> searchTop5ByCountryAndEmbeddingWithinOrderByDistance(String country, Vector embedding,
Score distance);
SearchResults<WithDescription> searchInterfaceProjectionByCountryAndEmbeddingWithin(String country,
Vector embedding, Score distance);
SearchResults<DescriptionDto> searchDtoByCountryAndEmbeddingWithin(String country, Vector embedding,
Score distance);
<T> SearchResults<T> searchDynamicByCountryAndEmbeddingWithin(String country, Vector embedding, Score distance,
Class<T> projection);
}
}
| VectorSearchRepository |
java | quarkusio__quarkus | extensions/amazon-lambda/common-deployment/src/main/java/io/quarkus/amazon/lambda/deployment/FunctionZipProcessor.java | {
"start": 1357,
"end": 9346
} | class ____ {
private static final Logger log = Logger.getLogger(FunctionZipProcessor.class);
/**
* Function.zip is same as the runner jar plus dependencies in lib/
* plus anything in src/main/zip.jvm
*
* @param target
* @param artifactResultProducer
* @param jar
* @throws Exception
*/
@BuildStep(onlyIf = IsProduction.class, onlyIfNot = NativeBuild.class)
public void jvmZip(OutputTargetBuildItem target,
PackageConfig packageConfig,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
JarBuildItem jar) throws Exception {
if (packageConfig.jar().type() != PackageConfig.JarConfig.JarType.LEGACY_JAR) {
throw new BuildException("Lambda deployments need to use a legacy JAR, " +
"please set 'quarkus.package.jar.type=legacy-jar' inside your application.properties",
List.of());
}
Path zipPath = target.getOutputDirectory().resolve("function.zip");
Path zipDir = findJvmZipDir(target.getOutputDirectory());
try (ZipArchiveOutputStream zip = new ZipArchiveOutputStream(zipPath.toFile())) {
try (ZipArchiveInputStream zinput = new ZipArchiveInputStream(new FileInputStream(jar.getPath().toFile()))) {
for (;;) {
ZipArchiveEntry entry = zinput.getNextZipEntry();
if (entry == null)
break;
copyZipEntry(zip, zinput, entry);
}
}
if (zipDir != null) {
try (Stream<Path> paths = Files.walk(zipDir)) {
paths.filter(Files::isRegularFile)
.forEach(path -> {
try {
int mode = Files.isExecutable(path) ? 0755 : 0644;
addZipEntry(zip, path, zipDir.relativize(path).toString().replace('\\', '/'), mode);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
});
}
}
if (!jar.isUberJar()) {
try (Stream<Path> paths = Files.walk(jar.getLibraryDir())) {
paths.filter(Files::isRegularFile)
.forEach(path -> {
try {
int mode = Files.isExecutable(path) ? 0755 : 0644;
addZipEntry(zip, path,
"lib/" + jar.getLibraryDir().relativize(path).toString().replace('\\', '/'),
mode);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
});
}
}
}
}
/**
* Native function.zip adds anything in src/main/zip.native. If src/main/zip.native/bootstrap
* exists then the native executable is renamed to "runner".
*
* @param target
* @param artifactResultProducer
* @param nativeImage
* @throws Exception
*/
@BuildStep(onlyIf = { IsProduction.class, NativeBuild.class })
public void nativeZip(OutputTargetBuildItem target,
Optional<UpxCompressedBuildItem> upxCompressed, // used to ensure that we work with the compressed native binary if compression was enabled
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
NativeImageBuildItem nativeImage,
NativeImageRunnerBuildItem nativeImageRunner) throws Exception {
Path zipDir = findNativeZipDir(target.getOutputDirectory());
Path zipPath = target.getOutputDirectory().resolve("function.zip");
Files.deleteIfExists(zipPath);
try (ZipArchiveOutputStream zip = new ZipArchiveOutputStream(zipPath.toFile())) {
String executableName = "bootstrap";
if (zipDir != null) {
File bootstrap = zipDir.resolve("bootstrap").toFile();
if (bootstrap.exists()) {
executableName = "runner";
}
try (Stream<Path> paths = Files.walk(zipDir)) {
paths.filter(Files::isRegularFile)
.forEach(path -> {
try {
if (bootstrap.equals(path.toFile())) {
addZipEntry(zip, path, "bootstrap", 0755);
} else {
int mode = Files.isExecutable(path) ? 0755 : 0644;
addZipEntry(zip, path, zipDir.relativize(path).toString().replace('\\', '/'), mode);
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
});
}
}
addZipEntry(zip, nativeImage.getPath(), executableName, 0755);
// See https://github.com/oracle/graal/issues/4921
try (DirectoryStream<Path> sharedLibs = Files.newDirectoryStream(nativeImage.getPath().getParent(),
"*.{so,dll}")) {
sharedLibs.forEach(src -> {
try {
// In this use case, we can force all libs to be non-executable.
addZipEntry(zip, src, src.getFileName().toString(), 0644);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
} catch (IOException e) {
log.errorf("Could not list files in directory %s. Continuing. Error: %s", nativeImage.getPath().getParent(), e);
}
}
}
private void copyZipEntry(ZipArchiveOutputStream zip, InputStream zinput, ZipArchiveEntry from) throws Exception {
ZipArchiveEntry entry = new ZipArchiveEntry(from);
zip.putArchiveEntry(entry);
zinput.transferTo(zip);
zip.closeArchiveEntry();
}
private void addZipEntry(ZipArchiveOutputStream zip, Path path, String name, int mode) throws Exception {
ZipArchiveEntry entry = (ZipArchiveEntry) zip.createArchiveEntry(path.toFile(), name);
entry.setUnixMode(mode);
zip.putArchiveEntry(entry);
try (InputStream i = Files.newInputStream(path)) {
i.transferTo(zip);
}
zip.closeArchiveEntry();
}
private static Path findNativeZipDir(Path outputDirectory) {
Path mainSrc = findMainSourcesRoot(outputDirectory);
if (mainSrc == null)
return null;
Path zipDir = mainSrc.resolve("zip.native");
return Files.exists(zipDir) && Files.isDirectory(zipDir) ? zipDir : null;
}
private static Path findJvmZipDir(Path outputDirectory) {
Path mainSrc = findMainSourcesRoot(outputDirectory);
if (mainSrc == null)
return null;
Path zipDir = mainSrc.resolve("zip.jvm");
return Files.exists(zipDir) && Files.isDirectory(zipDir) ? zipDir : null;
}
private static Path findMainSourcesRoot(Path outputDirectory) {
Path currentPath = outputDirectory;
do {
Path toCheck = currentPath.resolve(Paths.get("src", "main"));
if (toCheck.toFile().exists()) {
return toCheck;
}
Path parent = currentPath.getParent();
if (parent != null && Files.exists(parent)) {
currentPath = parent;
} else {
return null;
}
} while (true);
}
}
| FunctionZipProcessor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/BooleanLiteralEntity.java | {
"start": 215,
"end": 954
} | class ____ {
private Long id;
private boolean yesNoBoolean;
private boolean trueFalseBoolean;
private boolean zeroOneBoolean;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public boolean isYesNoBoolean() {
return yesNoBoolean;
}
public void setYesNoBoolean(boolean yesNoBoolean) {
this.yesNoBoolean = yesNoBoolean;
}
public boolean isTrueFalseBoolean() {
return trueFalseBoolean;
}
public void setTrueFalseBoolean(boolean trueFalseBoolean) {
this.trueFalseBoolean = trueFalseBoolean;
}
public boolean isZeroOneBoolean() {
return zeroOneBoolean;
}
public void setZeroOneBoolean(boolean zeroOneBoolean) {
this.zeroOneBoolean = zeroOneBoolean;
}
}
| BooleanLiteralEntity |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/impl/future/PromiseImpl.java | {
"start": 694,
"end": 1275
} | class ____<T> extends FutureImpl<T> implements PromiseInternal<T> {
/**
* Create a promise that hasn't completed yet
*/
public PromiseImpl() {
super();
}
/**
* Create a promise that hasn't completed yet
*/
public PromiseImpl(ContextInternal context) {
super(context);
}
@Override
public Future<T> future() {
return this;
}
@Override
public void operationComplete(io.netty.util.concurrent.Future<T> future) {
if (future.isSuccess()) {
complete(future.getNow());
} else {
fail(future.cause());
}
}
}
| PromiseImpl |
java | apache__camel | components/camel-as2/camel-as2-component/src/generated/java/org/apache/camel/component/as2/internal/AS2ClientManagerApiMethod.java | {
"start": 667,
"end": 2933
} | enum ____ implements ApiMethod {
SEND(
org.apache.hc.core5.http.protocol.HttpCoreContext.class,
"send",
arg("ediMessage", Object.class),
arg("requestUri", String.class),
arg("subject", String.class),
arg("from", String.class),
arg("as2From", String.class),
arg("as2To", String.class),
arg("as2MessageStructure", org.apache.camel.component.as2.api.AS2MessageStructure.class),
arg("ediMessageContentType", String.class),
arg("ediMessageCharset", String.class),
arg("ediMessageTransferEncoding", String.class),
arg("signingAlgorithm", org.apache.camel.component.as2.api.AS2SignatureAlgorithm.class),
arg("signingCertificateChain", new java.security.cert.Certificate[0].getClass()),
arg("signingPrivateKey", java.security.PrivateKey.class),
arg("compressionAlgorithm", org.apache.camel.component.as2.api.AS2CompressionAlgorithm.class),
arg("dispositionNotificationTo", String.class),
arg("signedReceiptMicAlgorithms", String.class),
arg("encryptingAlgorithm", org.apache.camel.component.as2.api.AS2EncryptionAlgorithm.class),
arg("encryptingCertificateChain", new java.security.cert.Certificate[0].getClass()),
arg("attachedFileName", String.class),
arg("receiptDeliveryOption", String.class),
arg("userName", String.class),
arg("password", String.class),
arg("accessToken", String.class));
private final ApiMethod apiMethod;
AS2ClientManagerApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(AS2ClientManager.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
| AS2ClientManagerApiMethod |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java | {
"start": 1821,
"end": 7960
} | class ____ extends EsqlScalarFunction implements OptionalArgument {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Greatest", Greatest::new);
private DataType dataType;
@FunctionInfo(
returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" },
description = "Returns the maximum value from multiple columns. This is similar to <<esql-mv_max>>\n"
+ "except it is intended to run on multiple columns at once.",
note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. "
+ "When run on `boolean` columns this will return `true` if any values are `true`.",
examples = @Example(file = "math", tag = "greatest")
)
public Greatest(
Source source,
@Param(
name = "first",
type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" },
description = "First of the columns to evaluate."
) Expression first,
@Param(
name = "rest",
type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" },
description = "The rest of the columns to evaluate.",
optional = true
) List<Expression> rest
) {
super(source, Stream.concat(Stream.of(first), rest.stream()).toList());
}
private Greatest(StreamInput in) throws IOException {
this(
Source.readFrom((PlanStreamInput) in),
in.readNamedWriteable(Expression.class),
in.readNamedWriteableCollectionAsList(Expression.class)
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
source().writeTo(out);
out.writeNamedWriteable(children().get(0));
out.writeNamedWriteableCollection(children().subList(1, children().size()));
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public DataType dataType() {
if (dataType == null) {
resolveType();
}
return dataType;
}
@Override
protected TypeResolution resolveType() {
if (childrenResolved() == false) {
return new TypeResolution("Unresolved children");
}
for (int position = 0; position < children().size(); position++) {
Expression child = children().get(position);
if (dataType == null || dataType == NULL) {
dataType = child.dataType().noText();
continue;
}
TypeResolution resolution = TypeResolutions.isType(
child,
t -> t.noText() == dataType,
sourceText(),
TypeResolutions.ParamOrdinal.fromIndex(position),
dataType.typeName()
);
if (resolution.unresolved()) {
return resolution;
}
}
return TypeResolution.TYPE_RESOLVED;
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new Greatest(source(), newChildren.get(0), newChildren.subList(1, newChildren.size()));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, Greatest::new, children().get(0), children().subList(1, children().size()));
}
@Override
public boolean foldable() {
return Expressions.foldable(children());
}
@Override
public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) {
// force datatype initialization
var dataType = dataType();
ExpressionEvaluator.Factory[] factories = children().stream()
.map(e -> toEvaluator.apply(new MvMax(e.source(), e)))
.toArray(ExpressionEvaluator.Factory[]::new);
if (dataType == DataType.BOOLEAN) {
return new GreatestBooleanEvaluator.Factory(source(), factories);
}
if (dataType == DataType.DOUBLE) {
return new GreatestDoubleEvaluator.Factory(source(), factories);
}
if (dataType == DataType.INTEGER) {
return new GreatestIntEvaluator.Factory(source(), factories);
}
if (dataType == DataType.LONG || dataType == DataType.DATETIME || dataType == DataType.DATE_NANOS) {
return new GreatestLongEvaluator.Factory(source(), factories);
}
if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) {
return new GreatestBytesRefEvaluator.Factory(source(), factories);
}
throw EsqlIllegalArgumentException.illegalDataType(dataType);
}
@Evaluator(extraName = "Boolean")
static boolean process(boolean[] values) {
for (boolean v : values) {
if (v) {
return true;
}
}
return false;
}
@Evaluator(extraName = "BytesRef")
static BytesRef process(BytesRef[] values) {
BytesRef max = values[0];
for (int i = 1; i < values.length; i++) {
max = max.compareTo(values[i]) > 0 ? max : values[i];
}
return max;
}
@Evaluator(extraName = "Int")
static int process(int[] values) {
int max = values[0];
for (int i = 1; i < values.length; i++) {
max = Math.max(max, values[i]);
}
return max;
}
@Evaluator(extraName = "Long")
static long process(long[] values) {
long max = values[0];
for (int i = 1; i < values.length; i++) {
max = Math.max(max, values[i]);
}
return max;
}
@Evaluator(extraName = "Double")
static double process(double[] values) {
double max = values[0];
for (int i = 1; i < values.length; i++) {
max = Math.max(max, values[i]);
}
return max;
}
// TODO unsigned long
}
| Greatest |
java | apache__flink | flink-datastream-api/src/main/java/org/apache/flink/datastream/api/builtin/BuiltinFuncs.java | {
"start": 1967,
"end": 2384
} | class ____ {
// =================== Join ===========================
static final Class<?> JOIN_FUNCS_INSTANCE;
static {
try {
JOIN_FUNCS_INSTANCE =
Class.forName("org.apache.flink.datastream.impl.builtin.BuiltinJoinFuncs");
} catch (ClassNotFoundException e) {
throw new RuntimeException("Please ensure that flink-datastream in your | BuiltinFuncs |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/JSONStreamAware.java | {
"start": 820,
"end": 961
} | interface ____ {
/**
* write JSON string to out.
*/
void writeJSONString(Appendable out) throws IOException;
}
| JSONStreamAware |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/NoOutputDefinition.java | {
"start": 976,
"end": 1334
} | class ____<Type extends ProcessorDefinition<Type>> extends ProcessorDefinition<Type> {
@Override
public List<ProcessorDefinition<?>> getOutputs() {
return Collections.emptyList();
}
protected NoOutputDefinition() {
}
protected NoOutputDefinition(NoOutputDefinition<?> source) {
super(source);
}
}
| NoOutputDefinition |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/SavepointReaderCustomSerializerITCase.java | {
"start": 1265,
"end": 2917
} | class ____ extends SavepointReaderITTestBase {
private static final ListStateDescriptor<Integer> list =
new ListStateDescriptor<>(LIST_NAME, CustomIntSerializer.INSTANCE);
private static final ListStateDescriptor<Integer> union =
new ListStateDescriptor<>(UNION_NAME, CustomIntSerializer.INSTANCE);
private static final MapStateDescriptor<Integer, String> broadcast =
new MapStateDescriptor<>(
BROADCAST_NAME, CustomIntSerializer.INSTANCE, StringSerializer.INSTANCE);
public SavepointReaderCustomSerializerITCase() {
super(list, union, broadcast);
}
@Override
public DataStream<Integer> readListState(SavepointReader savepoint) throws IOException {
return savepoint.readListState(
OperatorIdentifier.forUid(UID), LIST_NAME, Types.INT, CustomIntSerializer.INSTANCE);
}
@Override
public DataStream<Integer> readUnionState(SavepointReader savepoint) throws IOException {
return savepoint.readUnionState(
OperatorIdentifier.forUid(UID),
UNION_NAME,
Types.INT,
CustomIntSerializer.INSTANCE);
}
@Override
public DataStream<Tuple2<Integer, String>> readBroadcastState(SavepointReader savepoint)
throws IOException {
return savepoint.readBroadcastState(
OperatorIdentifier.forUid(UID),
BROADCAST_NAME,
Types.INT,
Types.STRING,
CustomIntSerializer.INSTANCE,
StringSerializer.INSTANCE);
}
}
| SavepointReaderCustomSerializerITCase |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java | {
"start": 46038,
"end": 46336
} | class ____ {}
}
@SuppressWarnings("TruthSelfEquals")
@Test
public void testRedeclareFloatAndDouble() {
RedeclareFloatAndDouble iEqualMyself = RedeclareFloatAndDouble.of(Float.NaN, Double.NaN);
assertThat(iEqualMyself).isEqualTo(iEqualMyself);
}
@AutoValue
abstract static | Double |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ClassCanBeStaticTest.java | {
"start": 6521,
"end": 6692
} | class ____ {
{
System.err.println(x);
}
}
// BUG: Diagnostic contains:
private | One |
java | quarkusio__quarkus | integration-tests/jpa-mapping-xml/legacy-library-b/src/main/java/io/quarkus/it/jpa/mapping/xml/legacy/library_b/LibraryBEntity.java | {
"start": 244,
"end": 692
} | class ____ {
@Id
private long id;
@Basic
private String name;
public LibraryBEntity() {
}
public LibraryBEntity(String name) {
this.name = name;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| LibraryBEntity |
java | quarkusio__quarkus | extensions/grpc/deployment/src/test/java/io/quarkus/grpc/client/tls/HelloWorldTlsEndpoint.java | {
"start": 411,
"end": 1099
} | class ____ {
@GrpcClient("hello")
GreeterGrpc.GreeterBlockingStub blockingHelloService;
@GrpcClient("hello")
MutinyGreeterGrpc.MutinyGreeterStub mutinyHelloService;
@GET
@Path("/blocking/{name}")
public String helloBlocking(@PathParam("name") String name) {
return blockingHelloService.sayHello(HelloRequest.newBuilder().setName(name).build()).getMessage();
}
@GET
@Path("/mutiny/{name}")
public Uni<String> helloMutiny(@PathParam("name") String name) {
return mutinyHelloService.sayHello(HelloRequest.newBuilder().setName(name).build())
.onItem().transform(HelloReply::getMessage);
}
}
| HelloWorldTlsEndpoint |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotatedElementUtilsTests.java | {
"start": 57094,
"end": 57157
} | class ____ {
}
@Transactional
static | ComposedTransactionalClass |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/support/DefaultTransactionStatus.java | {
"start": 6797,
"end": 7368
} | interface ____ therefore supports savepoints.
* @see #getTransaction()
* @see #getSavepointManager()
*/
public boolean isTransactionSavepointManager() {
return (this.transaction instanceof SavepointManager);
}
/**
* Delegate the flushing to the transaction object, provided that the latter
* implements the {@link SmartTransactionObject} interface.
* @see SmartTransactionObject#flush()
*/
@Override
public void flush() {
if (this.transaction instanceof SmartTransactionObject smartTransactionObject) {
smartTransactionObject.flush();
}
}
}
| and |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/generics/TwoArgWrapper.java | {
"start": 206,
"end": 589
} | class ____<T1, T2> {
public TwoArgWrapper() {
}
public TwoArgWrapper(TwoArgHolder<T1, T2> wrapped) {
this.wrapped = wrapped;
}
private TwoArgHolder<T1, T2> wrapped;
public TwoArgHolder<T1, T2> getWrapped() {
return wrapped;
}
public void setWrapped(TwoArgHolder<T1, T2> wrapped) {
this.wrapped = wrapped;
}
}
| TwoArgWrapper |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/resultmatches/HeaderAssertionTests.java | {
"start": 7605,
"end": 8053
} | class ____ {
private long timestamp;
public void setStubTimestamp(long timestamp) {
this.timestamp = timestamp;
}
@RequestMapping("/persons/{id}")
public ResponseEntity<Person> showEntity(@PathVariable long id, WebRequest request) {
return ResponseEntity
.ok()
.lastModified(this.timestamp)
.header("X-Rate-Limiting", "42")
.header("Vary", "foo", "bar")
.body(new Person("Jason"));
}
}
}
| PersonController |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resteasy/async/filters/AsyncRequestFilterTest.java | {
"start": 776,
"end": 21462
} | class ____ {
protected static final Logger log = Logger.getLogger(AsyncRequestFilterTest.class.getName());
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(PortProviderUtil.class,
AsyncRequestFilter.class, AsyncRequestFilter1.class, AsyncRequestFilter2.class,
AsyncRequestFilter3.class,
AsyncPreMatchRequestFilter1.class, AsyncPreMatchRequestFilter2.class,
AsyncPreMatchRequestFilter3.class,
AsyncResponseFilter.class,
AsyncResponseFilter1.class, AsyncResponseFilter2.class, AsyncResponseFilter3.class,
AsyncFilterException.class, AsyncFilterExceptionMapper.class,
AsyncRequestFilterResource.class, ExceptionUtil.class);
}
});
/**
* @tpTestDetails Interceptors work
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testRequestFilters() throws Exception {
Client client = ClientBuilder.newClient();
// Create book.
WebTarget base = client.target(PortProviderUtil.generateURL("/"));
// all sync
Response response = base.request()
.header("Filter1", "sync-pass")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "sync-fail")
.header("Filter2", "sync-fail")
.header("Filter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter1", response.readEntity(String.class));
response = base.request()
.header("Filter1", "sync-pass")
.header("Filter2", "sync-fail")
.header("Filter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter2", response.readEntity(String.class));
response = base.request()
.header("Filter1", "sync-pass")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter3", response.readEntity(String.class));
// async
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "async-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "async-pass")
.header("Filter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "sync-pass")
.header("Filter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "sync-pass")
.header("Filter2", "async-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
// async failures
response = base.request()
.header("Filter1", "async-fail")
.header("Filter2", "sync-fail")
.header("Filter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter1", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "sync-fail")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter2", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "async-fail")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter2", response.readEntity(String.class));
// async instantaneous
response = base.request()
.header("Filter1", "async-pass-instant")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("Filter1", "async-fail-instant")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("Filter1", response.readEntity(String.class));
client.close();
}
/**
* @tpTestDetails Interceptors work
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testPreMatchRequestFilters() throws Exception {
Client client = ClientBuilder.newClient();
// Create book.
WebTarget base = client.target(PortProviderUtil.generateURL("/"));
// all sync
Response response = base.request()
.header("PreMatchFilter1", "sync-pass")
.header("PreMatchFilter2", "sync-pass")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "sync-fail")
.header("PreMatchFilter2", "sync-fail")
.header("PreMatchFilter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter1", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "sync-pass")
.header("PreMatchFilter2", "sync-fail")
.header("PreMatchFilter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter2", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "sync-pass")
.header("PreMatchFilter2", "sync-pass")
.header("PreMatchFilter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter3", response.readEntity(String.class));
// async
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "sync-pass")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals("resource", response.readEntity(String.class));
assertEquals(200, response.getStatus());
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "async-pass")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "async-pass")
.header("PreMatchFilter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "sync-pass")
.header("PreMatchFilter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "sync-pass")
.header("PreMatchFilter2", "async-pass")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
// async failures
response = base.request()
.header("PreMatchFilter1", "async-fail")
.header("PreMatchFilter2", "sync-fail")
.header("PreMatchFilter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter1", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "sync-fail")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter2", response.readEntity(String.class));
response = base.request()
.header("PreMatchFilter1", "async-pass")
.header("PreMatchFilter2", "async-fail")
.header("PreMatchFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("PreMatchFilter2", response.readEntity(String.class));
client.close();
}
/**
* @tpTestDetails Interceptors work
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testResponseFilters() throws Exception {
Client client = ClientBuilder.newClient();
// Create book.
WebTarget base = client.target(PortProviderUtil.generateURL("/"));
// all sync
Response response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "sync-fail")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter1", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "sync-fail")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter2", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-fail")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter3", response.readEntity(String.class));
// async
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals("resource", response.readEntity(String.class));
assertEquals(200, response.getStatus());
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "async-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "async-pass")
.header("ResponseFilter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "async-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "async-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
// async failures
response = base.request()
.header("ResponseFilter1", "async-fail")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter1", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "sync-fail")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter2", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "async-pass")
.header("ResponseFilter2", "async-fail")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter2", response.readEntity(String.class));
// async instantaneous
response = base.request()
.header("ResponseFilter1", "async-pass-instant")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
response = base.request()
.header("ResponseFilter1", "async-fail-instant")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter1", response.readEntity(String.class));
client.close();
}
/**
* @tpTestDetails Interceptors work
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testResponseFilters2() throws Exception {
Client client = ClientBuilder.newClient();
// Create book.
WebTarget base = client.target(PortProviderUtil.generateURL("/async"));
// async way later
Response response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "sync-pass")
.header("ResponseFilter3", "async-fail-late")
.get();
assertEquals(200, response.getStatus());
assertEquals("ResponseFilter3", response.readEntity(String.class));
client.close();
}
/**
* @tpTestDetails Async filters work with resume(Throwable) wrt filters/callbacks/complete
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testResponseFiltersThrow() throws Exception {
Client client = ClientBuilder.newClient();
testResponseFilterThrow(client, "/callback-async", false);
testResponseFilterThrow(client, "/callback", false);
testResponseFilterThrow(client, "/callback-async", true);
testResponseFilterThrow(client, "/callback", true);
client.close();
}
private void testResponseFilterThrow(Client client, String target, boolean useExceptionMapper) {
WebTarget base = client.target(PortProviderUtil.generateURL(target));
// throw in response filter
Response response = base.request()
.header("ResponseFilter1", "sync-pass")
.header("ResponseFilter2", "sync-pass")
.header("UseExceptionMapper", useExceptionMapper)
.header("ResponseFilter3", "async-throw-late")
.get();
// this is 500 even with exception mapper because exceptions in response filters are not mapped
assertEquals(500, response.getStatus());
try {
// give a chance to CI to run the callbacks
Thread.sleep(1000);
} catch (InterruptedException e) {
}
// check that callbacks were called
response = base.request().get();
assertEquals(200, response.getStatus());
if (useExceptionMapper)
assertEquals(AsyncFilterException.class.getName() + ": ouch",
response.getHeaders().getFirst("ResponseFilterCallbackResponseFilter3"));
else
assertEquals("java.lang.Throwable: ouch", response.getHeaders().getFirst("ResponseFilterCallbackResponseFilter3"));
// throw in request filter
response = base.request()
.header("Filter1", "sync-pass")
.header("Filter2", "sync-pass")
.header("UseExceptionMapper", useExceptionMapper)
.header("Filter3", "async-throw-late")
.get();
if (useExceptionMapper) {
assertEquals(Status.ACCEPTED.getStatusCode(), response.getStatus());
assertEquals("exception was mapped", response.readEntity(String.class));
} else {
assertEquals(500, response.getStatus());
}
try {
// give a chance to CI to run the callbacks
Thread.sleep(1000);
} catch (InterruptedException e) {
}
// check that callbacks were called
response = base.request().get();
assertEquals(200, response.getStatus());
if (useExceptionMapper)
assertEquals(AsyncFilterException.class.getName() + ": ouch",
response.getHeaders().getFirst("RequestFilterCallbackFilter3"));
else
assertEquals("java.lang.Throwable: ouch", response.getHeaders().getFirst("RequestFilterCallbackFilter3"));
}
/**
* @tpTestDetails Interceptors work with non-Response resource methods
* @tpSince RESTEasy 4.0.0
*/
@Test
public void testRequestFiltersGuessReturnType() throws Exception {
Client client = ClientBuilder.newClient();
// Create book.
WebTarget base = client.target(PortProviderUtil.generateURL("/non-response"));
Response response = base.request()
.header("Filter1", "async-pass")
.header("Filter2", "sync-pass")
.header("Filter3", "sync-pass")
.get();
assertEquals(200, response.getStatus());
assertEquals("resource", response.readEntity(String.class));
client.close();
}
}
| AsyncRequestFilterTest |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/JsonFactory.java | {
"start": 536,
"end": 1778
} | class ____ {
private static final com.fasterxml.jackson.core.JsonFactory jsonFactory;
static {
jsonFactory = new com.fasterxml.jackson.core.JsonFactory();
jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
jsonFactory.configure(com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_COMMENTS, true);
jsonFactory.configure(com.fasterxml.jackson.core.JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many
// mappings now...
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method
jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
jsonFactory.configure(com.fasterxml.jackson.core.JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
public static JsonGenerator generator(OutputStream out) throws IOException {
return jsonFactory.createGenerator(out, JsonEncoding.UTF8);
}
public static JsonParser parser(InputStream in) throws IOException {
return jsonFactory.createParser(in);
}
}
| JsonFactory |
java | elastic__elasticsearch | x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/rest/RestGetRepositoriesMeteringAction.java | {
"start": 948,
"end": 1773
} | class ____ extends BaseRestHandler {
@Override
public String getName() {
return "get_repositories_metering_action";
}
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_nodes/{nodeId}/_repositories_metering"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
RepositoriesMeteringRequest repositoriesMeteringRequest = new RepositoriesMeteringRequest(nodesIds);
return channel -> client.execute(
RepositoriesMeteringAction.INSTANCE,
repositoriesMeteringRequest,
new RestActions.NodesResponseRestListener<>(channel)
);
}
}
| RestGetRepositoriesMeteringAction |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java | {
"start": 2576,
"end": 8453
} | class ____ implements SystemResourceDescriptor {
private final String dataStreamName;
private final String description;
private final Type type;
private final ComposableIndexTemplate composableIndexTemplate;
private final Map<String, ComponentTemplate> componentTemplates;
private final List<String> allowedElasticProductOrigins;
private final String origin;
private final ExecutorNames executorNames;
/**
* Creates a new descriptor for a system data descriptor
* @param dataStreamName the name of the data stream. Must not be {@code null}
* @param description a brief description of what the data stream is used for. Must not be {@code null}
* @param type the {@link Type} of the data stream which determines how the data stream can be accessed. Must not be {@code null}
* @param composableIndexTemplate the {@link ComposableIndexTemplate} that contains the mappings and settings for the data stream.
* Must not be {@code null}
* @param componentTemplates a map that contains {@link ComponentTemplate} instances corresponding to those references in the
* {@link ComposableIndexTemplate}
* @param allowedElasticProductOrigins a list of product origin values that are allowed to access this data stream if the
* type is {@link Type#EXTERNAL}. Must not be {@code null}
* @param origin specifies the origin to use when creating or updating the data stream
* @param executorNames thread pools that should be used for operations on the system data stream
*/
public SystemDataStreamDescriptor(
String dataStreamName,
String description,
Type type,
ComposableIndexTemplate composableIndexTemplate,
Map<String, ComponentTemplate> componentTemplates,
List<String> allowedElasticProductOrigins,
String origin,
ExecutorNames executorNames
) {
this.dataStreamName = Objects.requireNonNull(dataStreamName, "dataStreamName must be specified");
if (dataStreamName.length() < 2) {
throw new IllegalArgumentException("system data stream name [" + dataStreamName + "] but must at least 2 characters in length");
}
if (dataStreamName.charAt(0) != '.') {
throw new IllegalArgumentException("system data stream name [" + dataStreamName + "] but must start with the character [.]");
}
this.description = Objects.requireNonNull(description, "description must be specified");
this.type = Objects.requireNonNull(type, "type must be specified");
this.composableIndexTemplate = Objects.requireNonNull(composableIndexTemplate, "composableIndexTemplate must be provided");
this.componentTemplates = componentTemplates == null ? Map.of() : Map.copyOf(componentTemplates);
this.allowedElasticProductOrigins = Objects.requireNonNull(
allowedElasticProductOrigins,
"allowedElasticProductOrigins must not be null"
);
if (type == Type.EXTERNAL && allowedElasticProductOrigins.isEmpty()) {
throw new IllegalArgumentException("External system data stream without allowed products is not a valid combination");
}
this.executorNames = Objects.nonNull(executorNames) ? executorNames : ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS;
this.origin = origin;
}
public String getDataStreamName() {
return dataStreamName;
}
/**
* Retrieve backing indices for this system data stream
* @param metadata Metadata in which to look for indices
* @return List of names of backing indices
*/
@Deprecated
public List<String> getBackingIndexNames(Metadata metadata) {
return getBackingIndexNames(metadata.getProject());
}
/**
* Retrieve backing indices for this system data stream
* @param projectMetadata Project metadata in which to look for indices
* @return List of names of backing indices
*/
public List<String> getBackingIndexNames(ProjectMetadata projectMetadata) {
DataStream dataStream = projectMetadata.dataStreams().get(dataStreamName);
if (dataStream == null) {
return Collections.emptyList();
}
return Stream.concat(dataStream.getIndices().stream(), dataStream.getFailureIndices().stream()).map(Index::getName).toList();
}
@Override
public List<String> getMatchingIndices(ProjectMetadata metadata) {
return getBackingIndexNames(metadata);
}
public String getDescription() {
return description;
}
public ComposableIndexTemplate getComposableIndexTemplate() {
return composableIndexTemplate;
}
@Override
public String getOrigin() {
return origin;
}
@Override
public boolean isAutomaticallyManaged() {
return true;
}
@Override
public boolean isExternal() {
return type == Type.EXTERNAL;
}
public String getBackingIndexPattern() {
return backingIndexPatternForDataStream(getDataStreamName());
}
private static String backingIndexPatternForDataStream(String dataStream) {
return ".(migrated-){0,}[fd]s-" + dataStream + "-*";
}
@Override
public List<String> getAllowedElasticProductOrigins() {
return allowedElasticProductOrigins;
}
public Map<String, ComponentTemplate> getComponentTemplates() {
return componentTemplates;
}
/**
* Get the names of the thread pools that should be used for operations on this data stream.
* @return Names for get, search, and write executors.
*/
@Override
public ExecutorNames getThreadPoolNames() {
return this.executorNames;
}
public | SystemDataStreamDescriptor |
java | google__gson | test-shrinker/src/test/java/com/google/gson/it/ShrinkingIT.java | {
"start": 2576,
"end": 2767
} | class ____; load all custom classes from JAR and not
// from dependencies of this test
ClassLoader classLoader = null;
// Load the shrunken and obfuscated JARs with a separate | loader |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-mutiny/deployment/src/test/java/io/quarkus/resteasy/mutiny/test/annotations/Async.java | {
"start": 386,
"end": 430
} | interface ____ {
// Marker annotation
} | Async |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtils.java | {
"start": 763,
"end": 2484
} | class ____ {
/**
* Method to get indexmetadata of a index, that potentially is behind an alias or data stream.
*
* @param name Name of the index or the alias
* @param metadata Metadata to search for the name
* @return IndexMetadata of the concrete index. If this alias or data stream has a writable index, this one is returned
* @throws IllegalStateException If an alias points to two indices
* @throws IndexNotFoundException If no index exists
*/
public static IndexMetadata getConcreteIndex(String name, Metadata metadata) {
@NotMultiProjectCapable(description = "Watcher is not available in serverless")
ProjectMetadata projectMetadata = metadata.getProject(ProjectId.DEFAULT);
IndexAbstraction indexAbstraction = projectMetadata.getIndicesLookup().get(name);
if (indexAbstraction == null) {
return null;
}
if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS
&& indexAbstraction.getIndices().size() > 1
&& indexAbstraction.getWriteIndex() == null) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"Alias [%s] points to %d indices, and does not have a designated write index",
name,
indexAbstraction.getIndices().size()
)
);
}
Index concreteIndex = indexAbstraction.getWriteIndex();
if (concreteIndex == null) {
concreteIndex = indexAbstraction.getIndices().get(indexAbstraction.getIndices().size() - 1);
}
return projectMetadata.index(concreteIndex);
}
}
| WatchStoreUtils |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/inputstreams/BinaryDiff_diff_InputStream_Test.java | {
"start": 1034,
"end": 3102
} | class ____ {
private static BinaryDiff binaryDiff;
@BeforeAll
static void setUpOnce() {
binaryDiff = new BinaryDiff();
}
private InputStream actual;
private InputStream expected;
@Test
void should_return_no_diff_if_inputstreams_have_equal_content() throws IOException {
actual = stream(0xCA, 0xFE, 0xBA, 0xBE);
expected = stream(0xCA, 0xFE, 0xBA, 0xBE);
BinaryDiffResult result = binaryDiff.diff(actual, expected);
assertThat(result.hasNoDiff()).isTrue();
}
@Test
void should_return_diff_if_inputstreams_differ_on_one_byte() throws IOException {
actual = stream(0xCA, 0xFE, 0xBA, 0xBE);
expected = stream(0xCA, 0xFE, 0xBE, 0xBE);
BinaryDiffResult result = binaryDiff.diff(actual, expected);
assertThat(result.hasDiff()).isTrue();
assertThat(result.offset).isEqualTo(2);
assertThat(result.actual).isEqualTo("0xBA");
assertThat(result.expected).isEqualTo("0xBE");
}
@Test
void should_return_diff_if_actual_is_shorter() throws IOException {
actual = stream(0xCA, 0xFE, 0xBA);
expected = stream(0xCA, 0xFE, 0xBA, 0xBE);
BinaryDiffResult result = binaryDiff.diff(actual, expected);
assertThat(result.hasDiff()).isTrue();
assertThat(result.offset).isEqualTo(3);
assertThat(result.actual).isEqualTo("EOF");
assertThat(result.expected).isEqualTo("0xBE");
}
@Test
void should_return_diff_if_expected_is_shorter() throws IOException {
actual = stream(0xCA, 0xFE, 0xBA, 0xBE);
expected = stream(0xCA, 0xFE, 0xBA);
BinaryDiffResult result = binaryDiff.diff(actual, expected);
assertThat(result.hasDiff()).isTrue();
assertThat(result.offset).isEqualTo(3);
assertThat(result.actual).isEqualTo("0xBE");
assertThat(result.expected).isEqualTo("EOF");
}
private InputStream stream(int... contents) {
byte[] byteContents = new byte[contents.length];
for (int i = 0; i < contents.length; i++) {
byteContents[i] = (byte) contents[i];
}
return new ByteArrayInputStream(byteContents);
}
}
| BinaryDiff_diff_InputStream_Test |
java | dropwizard__dropwizard | dropwizard-e2e/src/main/java/com/example/validation/ValidatedResource.java | {
"start": 391,
"end": 1087
} | class ____ {
@GET
@Path("/injectable")
public void injectableValidation(@QueryParam("value") @OneOf("right") @WasInjected String value) {
//Do nothing
}
@GET
@Path("/default")
public void defaultValidation(@QueryParam("value") @OneOf("right") String value) {
//Do nothing
}
@POST
@Path("/bean-validation")
public void beanValidation(@Valid @NotNull ValidatedBean bean) {
if (bean == null
|| bean.getString() == null || bean.getString().trim().isEmpty()
|| bean.getNumber() < 0
|| bean.getList().isEmpty()) {
throw new IllegalArgumentException();
}
}
}
| ValidatedResource |
java | elastic__elasticsearch | modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapperTests.java | {
"start": 1406,
"end": 8589
} | class ____ extends MapperTestCase {
@Override
protected Object getSampleValueForDocument() {
return Map.of("ten", 10, "twenty", 20);
}
@Override
protected Object getSampleObjectForDocument() {
return getSampleValueForDocument();
}
@Override
protected void assertExistsQuery(MapperService mapperService) {
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> super.assertExistsQuery(mapperService));
assertEquals("[rank_features] fields do not support [exists] queries", iae.getMessage());
}
@Override
protected Collection<? extends Plugin> getPlugins() {
return List.of(new MapperExtrasPlugin());
}
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
b.field("type", "rank_features");
}
@Override
protected boolean supportsStoredFields() {
return false;
}
@Override
protected boolean supportsIgnoreMalformed() {
return false;
}
@Override
protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck("positive_score_impact", b -> b.field("positive_score_impact", false));
}
@Override
protected boolean supportsMeta() {
return false;
}
public void testDefaults() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString());
ParsedDocument doc1 = mapper.parse(source(this::writeField));
List<IndexableField> fields = doc1.rootDoc().getFields("field");
assertEquals(2, fields.size());
assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class));
FeatureField featureField1 = null;
FeatureField featureField2 = null;
for (IndexableField field : fields) {
if (field.stringValue().equals("ten")) {
featureField1 = (FeatureField) field;
} else if (field.stringValue().equals("twenty")) {
featureField2 = (FeatureField) field;
} else {
throw new UnsupportedOperationException();
}
}
int freq1 = RankFeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null));
int freq2 = RankFeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null));
assertTrue(freq1 < freq2);
}
public void testNegativeScoreImpact() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", "rank_features").field("positive_score_impact", false))
);
ParsedDocument doc1 = mapper.parse(source(this::writeField));
List<IndexableField> fields = doc1.rootDoc().getFields("field");
assertEquals(2, fields.size());
assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class));
FeatureField featureField1 = null;
FeatureField featureField2 = null;
for (IndexableField field : fields) {
if (field.stringValue().equals("ten")) {
featureField1 = (FeatureField) field;
} else if (field.stringValue().equals("twenty")) {
featureField2 = (FeatureField) field;
} else {
throw new UnsupportedOperationException();
}
}
int freq1 = RankFeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null));
int freq2 = RankFeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null));
assertTrue(freq1 > freq2);
}
public void testDotinFieldname() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
DocumentParsingException ex = expectThrows(
DocumentParsingException.class,
() -> mapper.parse(source(b -> b.field("field", Map.of("politi.cs", 10, "sports", 20))))
);
assertThat(ex.getCause().getMessage(), containsString("do not support dots in feature names"));
assertThat(ex.getCause().getMessage(), containsString("politi.cs"));
}
public void testRejectMultiValuedFields() throws MapperParsingException, IOException {
DocumentMapper mapper = createDocumentMapper(mapping(b -> {
b.startObject("field").field("type", "rank_features").endObject();
b.startObject("foo").startObject("properties");
{
b.startObject("field").field("type", "rank_features").endObject();
}
b.endObject().endObject();
}));
DocumentParsingException e = expectThrows(
DocumentParsingException.class,
() -> mapper.parse(source(b -> b.startObject("field").field("foo", Arrays.asList(10, 20)).endObject()))
);
assertEquals(
"[rank_features] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + "START_ARRAY",
e.getCause().getMessage()
);
e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("foo");
{
b.startObject().startObject("field").field("bar", 10).endObject().endObject();
b.startObject().startObject("field").field("bar", 20).endObject().endObject();
}
b.endArray();
})));
assertEquals(
"[rank_features] fields do not support indexing multiple values for the same rank feature [foo.field.bar] in "
+ "the same document",
e.getCause().getMessage()
);
}
public void testCannotBeUsedInMultifields() {
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
b.field("type", "keyword");
b.startObject("fields");
b.startObject("feature");
b.field("type", "rank_features");
b.endObject();
b.endObject();
})));
assertThat(e.getMessage(), containsString("Field [feature] of type [rank_features] can't be used in multifields"));
}
@Override
protected Object generateRandomInputValue(MappedFieldType ft) {
assumeFalse("Test implemented in a follow up", true);
return null;
}
@Override
protected boolean allowsNullValues() {
return false; // TODO should this allow null values?
}
@Override
protected SyntheticSourceSupport syntheticSourceSupport(boolean syntheticSource) {
throw new AssumptionViolatedException("not supported");
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
throw new AssumptionViolatedException("not supported");
}
@Override
protected List<SortShortcutSupport> getSortShortcutSupport() {
return List.of();
}
@Override
protected boolean supportsDocValuesSkippers() {
return false;
}
}
| RankFeaturesFieldMapperTests |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 113280,
"end": 113491
} | class ____ extends AsyncScalarFunction {
public void eval(CompletableFuture<Long> f, int[] i) {}
}
@FunctionHint(output = @DataTypeHint("STRING"))
private static | InvalidMethodScalarFunctionAsync |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/short_/ShortAssert_isNotPositive_Test.java | {
"start": 892,
"end": 1209
} | class ____ extends ShortAssertBaseTest {
@Override
protected ShortAssert invoke_api_method() {
return assertions.isNotPositive();
}
@Override
protected void verify_internal_effects() {
verify(shorts).assertIsNotPositive(getInfo(assertions), getActual(assertions));
}
}
| ShortAssert_isNotPositive_Test |
java | apache__camel | components/camel-sql/src/main/java/org/apache/camel/processor/aggregate/jdbc/JdbcCamelCodec.java | {
"start": 1282,
"end": 5526
} | class ____ {
public byte[] marshallExchange(Exchange exchange, boolean allowSerializedHeaders)
throws IOException {
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
marshallExchange(exchange, allowSerializedHeaders, bytesOut);
return bytesOut.toByteArray();
}
public void marshallExchange(
Exchange exchange, boolean allowSerializedHeaders, OutputStream outputStream)
throws IOException {
// use DefaultExchangeHolder to marshal to a serialized object
DefaultExchangeHolder pe = DefaultExchangeHolder.marshal(exchange, false, allowSerializedHeaders);
// add the aggregated size and timeout property as the only properties we want to retain
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_SIZE,
exchange.getProperty(ExchangePropertyKey.AGGREGATED_SIZE, Integer.class));
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_TIMEOUT,
exchange.getProperty(ExchangePropertyKey.AGGREGATED_TIMEOUT, Long.class));
// add the aggregated completed by property to retain
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_COMPLETED_BY,
exchange.getProperty(ExchangePropertyKey.AGGREGATED_COMPLETED_BY, String.class));
// add the aggregated correlation key property to retain
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_CORRELATION_KEY,
exchange.getProperty(ExchangePropertyKey.AGGREGATED_CORRELATION_KEY, String.class));
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_CORRELATION_KEY,
exchange.getProperty(ExchangePropertyKey.AGGREGATED_CORRELATION_KEY, String.class));
// and a guard property if using the flexible toolbox aggregator
DefaultExchangeHolder.addProperty(pe, Exchange.AGGREGATED_COLLECTION_GUARD,
exchange.getProperty(Exchange.AGGREGATED_COLLECTION_GUARD, String.class));
// persist the from endpoint as well
if (exchange.getFromEndpoint() != null) {
DefaultExchangeHolder.addProperty(pe, "CamelAggregatedFromEndpoint", exchange.getFromEndpoint().getEndpointUri());
}
encode(pe, outputStream);
}
public Exchange unmarshallExchange(CamelContext camelContext, byte[] buffer, String deserializationFilter)
throws IOException, ClassNotFoundException {
return unmarshallExchange(camelContext, new ByteArrayInputStream(buffer), deserializationFilter);
}
public Exchange unmarshallExchange(CamelContext camelContext, InputStream inputStream, String deserializationFilter)
throws IOException, ClassNotFoundException {
DefaultExchangeHolder pe = decode(camelContext, inputStream, deserializationFilter);
Exchange answer = new DefaultExchange(camelContext);
DefaultExchangeHolder.unmarshal(answer, pe);
// restore the from endpoint
String fromEndpointUri = (String) answer.removeProperty("CamelAggregatedFromEndpoint");
if (fromEndpointUri != null) {
Endpoint fromEndpoint = camelContext.hasEndpoint(fromEndpointUri);
if (fromEndpoint != null) {
answer.getExchangeExtension().setFromEndpoint(fromEndpoint);
}
}
return answer;
}
private void encode(Object object, OutputStream bytesOut) throws IOException {
try (ObjectOutputStream objectOut = new ObjectOutputStream(bytesOut)) {
objectOut.writeObject(object);
}
}
private DefaultExchangeHolder decode(CamelContext camelContext, InputStream bytesIn, String deserializationFilter)
throws IOException, ClassNotFoundException {
ObjectInputStream objectIn = null;
Object obj = null;
try {
objectIn = new ClassLoadingAwareObjectInputStream(camelContext.getApplicationContextClassLoader(), bytesIn);
objectIn.setObjectInputFilter(ObjectInputFilter.Config.createFilter(deserializationFilter));
obj = objectIn.readObject();
} finally {
IOHelper.close(objectIn);
}
return (DefaultExchangeHolder) obj;
}
}
| JdbcCamelCodec |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/ShareConsumeBenchWorker.java | {
"start": 11861,
"end": 14114
} | class ____ implements Runnable {
private final Histogram latencyHistogram;
private final Histogram messageSizeHistogram;
private final ThreadSafeShareConsumer consumer;
private final Optional<RecordProcessor> recordProcessor;
ConsumeStatusUpdater(Histogram latencyHistogram,
Histogram messageSizeHistogram,
ThreadSafeShareConsumer consumer,
Optional<RecordProcessor> recordProcessor) {
this.latencyHistogram = latencyHistogram;
this.messageSizeHistogram = messageSizeHistogram;
this.consumer = consumer;
this.recordProcessor = recordProcessor;
}
@Override
public void run() {
try {
update();
} catch (Exception e) {
WorkerUtils.abort(log, "ConsumeStatusUpdater", e, doneFuture);
}
}
StatusData update() {
Histogram.Summary latSummary = latencyHistogram.summarize(StatusData.PERCENTILES);
Histogram.Summary msgSummary = messageSizeHistogram.summarize(StatusData.PERCENTILES);
// Parse out the RecordProcessor's status, id specified.
Optional<JsonNode> recordProcessorStatus = Optional.empty();
if (recordProcessor.isPresent()) {
recordProcessorStatus = Optional.of(recordProcessor.get().processorStatus());
}
StatusData statusData = new StatusData(
consumer.subscription(),
latSummary.numSamples(),
(long) (msgSummary.numSamples() * msgSummary.average()),
(long) msgSummary.average(),
latSummary.average(),
latSummary.percentiles().get(0).value(),
latSummary.percentiles().get(1).value(),
latSummary.percentiles().get(2).value(),
recordProcessorStatus);
statusUpdater.updateConsumeStatus(consumer.clientId(), statusData);
log.info("Status={}", JsonUtil.toJsonString(statusData));
return statusData;
}
}
public static | ConsumeStatusUpdater |
java | grpc__grpc-java | istio-interop-testing/src/test/java/io/grpc/testing/istio/EchoTestServerTest.java | {
"start": 16149,
"end": 16910
} | class ____ extends EchoTestServiceImplBase {
List<ForwardEchoRequest> receivedRequests;
List<Object> responsesToReturn;
@Override
public void forwardEcho(ForwardEchoRequest request,
StreamObserver<ForwardEchoResponse> responseObserver) {
receivedRequests.add(request);
Object response = responsesToReturn.remove(0);
if (response instanceof Throwable) {
responseObserver.onError((Throwable) response);
} else if (response instanceof ForwardEchoResponse) {
responseObserver.onNext((ForwardEchoResponse) response);
responseObserver.onCompleted();
}
responseObserver.onError(new IllegalArgumentException("Unknown type in responsesToReturn"));
}
}
}
| ForwardServiceForNonGrpcImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java | {
"start": 10470,
"end": 11344
} | class ____ extends OrdinalsFilter {
@Override
public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException {
final long numOrds = globalOrdinals.getValueCount();
final LongBitSet acceptedGlobalOrdinals = new LongBitSet(numOrds);
final TermsEnum termEnum = globalOrdinals.termsEnum();
BytesRef term = termEnum.next();
while (term != null) {
if (Math.floorMod(
StringHelper.murmurhash3_x86_32(term, HASH_PARTITIONING_SEED),
incNumPartitions
) == incZeroBasedPartition) {
acceptedGlobalOrdinals.set(termEnum.ord());
}
term = termEnum.next();
}
return acceptedGlobalOrdinals;
}
}
| PartitionedOrdinalsFilter |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java | {
"start": 73818,
"end": 75521
} | class ____ {
public static KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate(boolean isParentFieldStored, MultiFields multiFields) {
// if the parent field is stored, there is no need to delegate anything as we can get source directly from the stored field
if (isParentFieldStored) {
return null;
}
// otherwise, attempt to retrieve a keyword delegate to rely on for synthetic source
var kwd = getKeywordFieldMapperForSyntheticSource(multiFields);
if (kwd != null) {
return kwd.fieldType();
}
return null;
}
public static KeywordFieldMapper getKeywordFieldMapperForSyntheticSource(Iterable<? extends Mapper> multiFields) {
for (Mapper sub : multiFields) {
if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) {
KeywordFieldMapper kwd = (KeywordFieldMapper) sub;
if (keywordFieldSupportsSyntheticSource(kwd)) {
return kwd;
}
}
}
return null;
}
/**
* Returns whether the given keyword field supports synthetic source.
*/
private static boolean keywordFieldSupportsSyntheticSource(final KeywordFieldMapper keyword) {
// the field must be stored in some way, whether that be via store or doc values
return (keyword.hasNormalizer() == false || keyword.isNormalizerSkipStoreOriginalValue())
&& (keyword.fieldType().hasDocValues() || keyword.fieldType().isStored());
}
}
}
| SyntheticSourceHelper |
java | apache__spark | common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java | {
"start": 11227,
"end": 11693
} | class ____ {
/**
* For {@link FetchShuffleBlocks} message, the ids are reduceIds.
* For {@link FetchShuffleBlockChunks} message, the ids are chunkIds.
*/
final ArrayList<Integer> ids;
final ArrayList<String> blockIds;
BlocksInfo() {
this.ids = new ArrayList<>();
this.blockIds = new ArrayList<>();
}
}
/** Callback invoked on receipt of each chunk. We equate a single chunk to a single block. */
private | BlocksInfo |
java | google__dagger | javatests/dagger/internal/codegen/ScopingValidationTest.java | {
"start": 22001,
"end": 22868
} | interface ____ {",
" SimpleType type();",
"}");
CompilerTests.daggerCompiler(type, scopedComponent, unscopedComponent)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
String.join(
"\n",
"test.UnscopedComponent (unscoped) cannot depend on scoped components:",
" @Singleton test.ScopedComponent"));
});
}
@Test
public void componentWithSingletonScopeMayNotDependOnOtherScope() {
// Singleton must be the widest lifetime of present scopes.
Source type =
CompilerTests.javaSource(
"test.SimpleType",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | UnscopedComponent |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/convert/support/ArrayToArrayConverter.java | {
"start": 1342,
"end": 2690
} | class ____ implements ConditionalGenericConverter {
private final CollectionToArrayConverter helperConverter;
private final ConversionService conversionService;
public ArrayToArrayConverter(ConversionService conversionService) {
this.helperConverter = new CollectionToArrayConverter(conversionService);
this.conversionService = conversionService;
}
@Override
public Set<ConvertiblePair> getConvertibleTypes() {
return Collections.singleton(new ConvertiblePair(Object[].class, Object[].class));
}
@Override
public boolean matches(TypeDescriptor sourceType, TypeDescriptor targetType) {
return this.helperConverter.matches(sourceType, targetType);
}
@Override
public @Nullable Object convert(@Nullable Object source, TypeDescriptor sourceType, TypeDescriptor targetType) {
if (this.conversionService instanceof GenericConversionService genericConversionService) {
TypeDescriptor targetElement = targetType.getElementTypeDescriptor();
if (targetElement != null && targetType.getType().isInstance(source) &&
genericConversionService.canBypassConvert(sourceType.getElementTypeDescriptor(), targetElement)) {
return source;
}
}
List<Object> sourceList = Arrays.asList(ObjectUtils.toObjectArray(source));
return this.helperConverter.convert(sourceList, sourceType, targetType);
}
}
| ArrayToArrayConverter |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedDeserializer.java | {
"start": 3525,
"end": 3968
} | class ____ set using constructor "
+ "(" + inner.getClass().getName() + ")" +
" is different from the one set in " + deserializerConfigKey + " config " +
"(" + windowedInnerDeserializerClass.deserializer().getClass().getName() + ").");
}
} else if (inner == null && deserializerConfigValue == null) {
throw new IllegalArgumentException("Inner | deserializer |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/mock/http/server/reactive/MockServerHttpRequest.java | {
"start": 11269,
"end": 12410
} | interface ____ extends BaseBuilder<BodyBuilder> {
/**
* Set the length of the body in bytes, as specified by the
* {@code Content-Length} header.
* @param contentLength the content length
* @return this builder
* @see HttpHeaders#setContentLength(long)
*/
BodyBuilder contentLength(long contentLength);
/**
* Set the {@linkplain MediaType media type} of the body, as specified
* by the {@code Content-Type} header.
* @param contentType the content type
* @return this builder
* @see HttpHeaders#setContentType(MediaType)
*/
BodyBuilder contentType(MediaType contentType);
/**
* Set the body of the request and build it.
* @param body the body
* @return the built request entity
*/
MockServerHttpRequest body(Publisher<? extends DataBuffer> body);
/**
* Set the body of the request and build it.
* <p>The String is assumed to be UTF-8 encoded unless the request has a
* "content-type" header with a charset attribute.
* @param body the body as text
* @return the built request entity
*/
MockServerHttpRequest body(String body);
}
private static | BodyBuilder |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/ScopeRequestIntegrationTest.java | {
"start": 5618,
"end": 5856
} | class ____ {
private static final String INVALID = "invalid";
@Inject
public SomeObject(@Named(INVALID) String value) {
this.value = value;
}
private final String value;
}
@Singleton
public static | SomeObject |
java | quarkusio__quarkus | integration-tests/jpa-postgresql/src/main/java/io/quarkus/it/jpa/postgresql/defaultpu/SomeEmbeddable.java | {
"start": 338,
"end": 731
} | class ____ {
public int someNumber;
@JdbcTypeCode(SqlTypes.JSON)
public ToBeSerializedWithDateTime someOtherJson;
public SomeEmbeddable() {
}
public SomeEmbeddable(int someNumber, LocalDate date) {
this.someNumber = someNumber;
this.someOtherJson = new ToBeSerializedWithDateTime(date);
}
@RegisterForReflection
public static | SomeEmbeddable |
java | apache__camel | components/camel-servicenow/camel-servicenow-component/src/main/java/org/apache/camel/component/servicenow/ServiceNowExceptionModel.java | {
"start": 1048,
"end": 1561
} | class ____ {
private final String status;
private final Map<String, String> error;
public ServiceNowExceptionModel(
@JsonProperty("status") String status,
@JsonProperty("error") Map<String, String> error) {
this.status = status;
this.error = error;
}
public String getStatus() {
return status;
}
public Map<String, String> getError() {
return error;
}
}
| ServiceNowExceptionModel |
java | resilience4j__resilience4j | resilience4j-core/src/main/java/io/github/resilience4j/core/ContextAwareScheduledThreadPoolExecutor.java | {
"start": 984,
"end": 4122
} | class ____ extends ScheduledThreadPoolExecutor {
private final List<ContextPropagator> contextPropagators;
private static final String THREAD_PREFIX = "ContextAwareScheduledThreadPool";
private ContextAwareScheduledThreadPoolExecutor(int corePoolSize,
@Nullable List<ContextPropagator> contextPropagators) {
super(corePoolSize, new NamingThreadFactory(THREAD_PREFIX));
this.contextPropagators = contextPropagators != null ? contextPropagators : new ArrayList<>();
}
public List<ContextPropagator> getContextPropagators() {
return Collections.unmodifiableList(this.contextPropagators);
}
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.schedule(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), delay, unit);
}
@Override
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.schedule(ContextPropagator.decorateCallable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
return callable.call();
} finally {
MDC.clear();
}
}), delay, unit);
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.scheduleAtFixedRate(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.scheduleWithFixedDelay(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), initialDelay, delay, unit);
}
private Map<String, String> getMdcContextMap() {
return Optional.ofNullable(MDC.getCopyOfContextMap()).orElse(Collections.emptyMap());
}
private void setMDCContext(Map<String, String> contextMap) {
MDC.clear();
if (contextMap != null) {
MDC.setContextMap(contextMap);
}
}
public static Builder newScheduledThreadPool() {
return new Builder();
}
public static | ContextAwareScheduledThreadPoolExecutor |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/loop/FluxGroupByLoop.java | {
"start": 808,
"end": 1180
} | class ____ {
final FluxGroupByTest groupByTest = new FluxGroupByTest();
@Test
public void twoGroupsLongAsyncMergeHiddenLoop() {
for (int i = 0; i < 100; i++) {
groupByTest.twoGroupsLongAsyncMergeHidden();
}
}
@Test
public void twoGroupsLongAsyncMergeLoop() {
for (int i = 0; i < 100; i++) {
groupByTest.twoGroupsLongAsyncMerge();
}
}
}
| FluxGroupByLoop |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.