language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/AbstractBackpressureThrottlingSubscriber.java | {
"start": 1102,
"end": 1339
} | class ____ operators that throttle excessive updates from upstream in case if
* downstream {@link Subscriber} is not ready to receive updates.
*
* @param <T> the upstream value type
* @param <R> the downstream value type
*/
abstract | for |
java | quarkusio__quarkus | integration-tests/openapi/src/test/java/io/quarkus/it/openapi/jaxrs/StringTest.java | {
"start": 180,
"end": 5669
} | class ____ extends AbstractTest {
// Just String
@Test
public void testJustStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/justString", TEXT_PLAIN, "justString");
}
@Test
public void testJustStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/justString", TEXT_PLAIN, "justString");
}
@Test
public void testJustStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/justString", TEXT_PLAIN);
}
@Test
public void testJustStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/justString", TEXT_PLAIN);
}
// RestResponse<String>
@Test
public void testRestResponseStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/restResponseString", TEXT_PLAIN, "restResponseString");
}
@Test
public void testRestResponseStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/restResponseString", TEXT_PLAIN, "restResponseString");
}
@Test
public void testRestResponseStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/restResponseString", TEXT_PLAIN);
}
@Test
public void testRestResponseStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/restResponseString", TEXT_PLAIN);
}
// Optional<String>
// @Test
public void testOptionalStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/optionalString", TEXT_PLAIN, "optionalString");
}
// @Test
public void testOptionalStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/optionalString", TEXT_PLAIN, "optionalString");
}
@Test
public void testOptionalStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/optionalString", TEXT_PLAIN);
}
@Test
public void testOptionalStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/optionalString", TEXT_PLAIN);
}
// Uni<String>
@Test
public void testUniStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/uniString", TEXT_PLAIN, "uniString");
}
@Test
public void testUniStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/uniString", TEXT_PLAIN);
}
// CompletionStage<String>
@Test
public void testCompletionStageStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/completionStageString", TEXT_PLAIN, "completionStageString");
}
@Test
public void testCompletionStageStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/completionStageString", TEXT_PLAIN);
}
// CompletedFuture<String>
@Test
public void testCompletedFutureStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/completedFutureString", TEXT_PLAIN, "completedFutureString");
}
@Test
public void testCompletedFutureStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/completedFutureString", TEXT_PLAIN);
}
// List<String>
@Test
public void testListStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/listString", APPLICATION_JSON, "[\"listString\"]");
}
@Test
public void testListStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/listString", APPLICATION_JSON, "[\"listString\"]");
}
@Test
public void testListStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/listString", APPLICATION_JSON);
}
@Test
public void testListStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/listString", APPLICATION_JSON);
}
// String[]
@Test
public void testArrayStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/arrayString", APPLICATION_JSON, "[\"arrayString\"]");
}
@Test
public void testArrayStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/arrayString", APPLICATION_JSON, "[\"arrayString\"]");
}
@Test
public void testArrayStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/arrayString", APPLICATION_JSON);
}
@Test
public void testArrayStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/arrayString", APPLICATION_JSON);
}
// Map<String,String>
@Test
public void testMapStringInJaxRsServiceRequest() {
testServiceRequest("/jax-rs/defaultContentType/mapString", APPLICATION_JSON, "{\"mapString\":\"mapString\"}");
}
@Test
public void testMapStringInJaxRsServiceResponse() {
testServiceResponse("/jax-rs/defaultContentType/mapString", APPLICATION_JSON, "{\"mapString\":\"mapString\"}");
}
@Test
public void testMapStringInJaxRsOpenAPIRequest() {
testOpenAPIRequest("/jax-rs/defaultContentType/mapString", APPLICATION_JSON);
}
@Test
public void testMapStringInJaxRsOpenAPIResponse() {
testOpenAPIResponse("/jax-rs/defaultContentType/mapString", APPLICATION_JSON);
}
}
| StringTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/float2darray/Float2DArrayAssert_hasSameDimensionsAs_Test.java | {
"start": 928,
"end": 1336
} | class ____ extends Float2DArrayAssertBaseTest {
@Override
protected Float2DArrayAssert invoke_api_method() {
return assertions.hasSameDimensionsAs(new String[] { "a", "b" });
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSameDimensionsAs(getInfo(assertions), getActual(assertions), new String[] { "a", "b" });
}
}
| Float2DArrayAssert_hasSameDimensionsAs_Test |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrarTests.java | {
"start": 20890,
"end": 21115
} | class ____ {
private @Nullable String field;
public @Nullable String getField() {
return this.field;
}
public void setField(@Nullable String field) {
this.field = field;
}
}
}
public static | Nested |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/logging/structured/CommonStructuredLogFormatTests.java | {
"start": 858,
"end": 1593
} | class ____ {
@Test
void forIdReturnsCommonStructuredLogFormat() {
assertThat(CommonStructuredLogFormat.forId("ecs")).isEqualTo(CommonStructuredLogFormat.ELASTIC_COMMON_SCHEMA);
assertThat(CommonStructuredLogFormat.forId("logstash")).isEqualTo(CommonStructuredLogFormat.LOGSTASH);
}
@Test
void forIdWhenIdIsInDifferentCaseReturnsCommonStructuredLogFormat() {
assertThat(CommonStructuredLogFormat.forId("ECS")).isEqualTo(CommonStructuredLogFormat.ELASTIC_COMMON_SCHEMA);
assertThat(CommonStructuredLogFormat.forId("logSTAsh")).isEqualTo(CommonStructuredLogFormat.LOGSTASH);
}
@Test
void forIdWhenNotKnownReturnsNull() {
assertThat(CommonStructuredLogFormat.forId("madeup")).isNull();
}
}
| CommonStructuredLogFormatTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/shortarray/ShortArrayAssert_contains_at_Index_Test.java | {
"start": 1006,
"end": 1410
} | class ____ extends ShortArrayAssertBaseTest {
private final Index index = someIndex();
@Override
protected ShortArrayAssert invoke_api_method() {
return assertions.contains((short) 8, index);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContains(getInfo(assertions), getActual(assertions), (short) 8, index);
}
}
| ShortArrayAssert_contains_at_Index_Test |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/JmsInOutFixedReplyQueueTimeoutUseMessageIDAsCorrelationIDTest.java | {
"start": 1170,
"end": 2348
} | class ____ extends JmsInOutFixedReplyQueueTimeoutTest {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:JmsInOutFixedReplyQueueTimeoutTest")
.routeId("route-1")
.to(ExchangePattern.InOut,
"activemq:queue:JmsInOutFixedReplyQueueTimeoutUseMessageIDAsCorrelationIDTest?replyTo=queue:JmsInOutFixedReplyQueueTimeoutUseMessageIDAsCorrelationIDTestReply&useMessageIDAsCorrelationID=true&requestTimeout=2000")
.to("mock:result");
from("activemq:queue:JmsInOutFixedReplyQueueTimeoutUseMessageIDAsCorrelationIDTest")
.routeId("route-2")
.choice().when(body().isEqualTo("World"))
.log("Sleeping for 4 sec to force a timeout")
.delay(Duration.ofSeconds(4).toMillis()).endChoice().end()
.transform(body().prepend("Bye ")).to("log:reply");
}
};
}
}
| JmsInOutFixedReplyQueueTimeoutUseMessageIDAsCorrelationIDTest |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/scope/AbstractConcurrentCustomScope.java | {
"start": 1776,
"end": 8444
} | class ____<A extends Annotation> implements CustomScope<A>, LifeCycle<AbstractConcurrentCustomScope<A>>, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(AbstractConcurrentCustomScope.class);
private final Class<A> annotationType;
private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
private final Lock r = rwl.readLock();
private final Lock w = rwl.writeLock();
/**
* A custom scope annotation.
*
* @param annotationType The annotation type
*/
protected AbstractConcurrentCustomScope(Class<A> annotationType) {
this.annotationType = Objects.requireNonNull(annotationType, "Annotation type cannot be null");
}
/**
* @param forCreation Whether it is for creation
* @return Obtains the scope map, never null
* @throws java.lang.IllegalStateException if the scope map cannot be obtained in the current context
*/
@NonNull
protected abstract Map<BeanIdentifier, CreatedBean<?>> getScopeMap(boolean forCreation);
@Override
public final Class<A> annotationType() {
return annotationType;
}
/**
* Implement the close logic for the scope.
*/
@Override
public abstract void close();
@NonNull
@Override
public final AbstractConcurrentCustomScope<A> stop() {
w.lock();
try {
try {
final Map<BeanIdentifier, CreatedBean<?>> scopeMap = getScopeMap(false);
destroyScope(scopeMap);
} catch (IllegalStateException e) {
// scope map not available in current context
}
close();
return this;
} finally {
w.unlock();
}
}
/**
* Destroys the scope.
*
* @param scopeMap The scope map
*/
protected void destroyScope(@Nullable Map<BeanIdentifier, CreatedBean<?>> scopeMap) {
w.lock();
try {
if (CollectionUtils.isNotEmpty(scopeMap)) {
for (CreatedBean<?> createdBean : scopeMap.values()) {
try {
createdBean.close();
} catch (BeanDestructionException e) {
handleDestructionException(e);
}
}
scopeMap.clear();
}
} finally {
w.unlock();
}
}
@SuppressWarnings("unchecked")
@Override
public final <T> T getOrCreate(BeanCreationContext<T> creationContext) {
r.lock();
try {
final Map<BeanIdentifier, CreatedBean<?>> scopeMap = getScopeMap(true);
final BeanIdentifier id = creationContext.id();
CreatedBean<?> createdBean = scopeMap.get(id);
if (createdBean != null) {
return (T) createdBean.bean();
} else {
r.unlock();
w.lock();
try {
// re-check
createdBean = scopeMap.get(id);
if (createdBean != null) {
r.lock();
return (T) createdBean.bean();
} else {
try {
createdBean = doCreate(creationContext);
scopeMap.put(id, createdBean);
} finally {
r.lock();
}
return (T) createdBean.bean();
}
} finally {
w.unlock();
}
}
} finally {
r.unlock();
}
}
/**
* Perform creation.
* @param creationContext The creation context
* @param <T> The generic type
* @return Created bean
*/
@NonNull
protected <T> CreatedBean<T> doCreate(@NonNull BeanCreationContext<T> creationContext) {
return creationContext.create();
}
@Override
public final <T> Optional<T> remove(BeanIdentifier identifier) {
if (identifier == null) {
return Optional.empty();
}
w.lock();
try {
final Map<BeanIdentifier, CreatedBean<?>> scopeMap;
try {
scopeMap = getScopeMap(false);
} catch (IllegalStateException e) {
return Optional.empty();
}
if (CollectionUtils.isNotEmpty(scopeMap)) {
final CreatedBean<?> createdBean = scopeMap.get(identifier);
if (createdBean != null) {
try {
createdBean.close();
} catch (BeanDestructionException e) {
handleDestructionException(e);
}
//noinspection ConstantConditions
return (Optional<T>) Optional.ofNullable(createdBean.bean());
} else {
return Optional.empty();
}
} else {
return Optional.empty();
}
} finally {
w.unlock();
}
}
/**
* Method that can be overridden to customize what happens on a shutdown error.
* @param e The exception
*/
protected void handleDestructionException(BeanDestructionException e) {
LOG.error("Error occurred destroying bean of scope @{}: {}", annotationType.getSimpleName(), e.getMessage(), e);
}
@SuppressWarnings("unchecked")
@Override
public final <T> Optional<BeanRegistration<T>> findBeanRegistration(T bean) {
r.lock();
try {
final Map<BeanIdentifier, CreatedBean<?>> scopeMap;
try {
scopeMap = getScopeMap(false);
} catch (Exception e) {
return Optional.empty();
}
for (CreatedBean<?> createdBean : scopeMap.values()) {
if (createdBean.bean() == bean) {
if (createdBean instanceof BeanRegistration) {
return Optional.of((BeanRegistration<T>) createdBean);
}
return Optional.of(
new BeanRegistration<>(
createdBean.id(),
(BeanDefinition<T>) createdBean.definition(),
bean
)
);
}
}
return Optional.empty();
} finally {
r.unlock();
}
}
}
| AbstractConcurrentCustomScope |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java | {
"start": 4828,
"end": 15476
} | class ____ implements Handler<Long> {
final WeakReference<HttpClientImpl> ref;
private PoolChecker(HttpClientImpl client) {
ref = new WeakReference<>(client);
}
@Override
public void handle(Long event) {
HttpClientImpl client = ref.get();
if (client != null) {
client.checkExpired(this);
}
}
}
protected void checkExpired(Handler<Long> checker) {
synchronized (this) {
if (!closeSequence.started()) {
timerID = vertx.setTimer(poolOptions.getCleanerPeriod(), checker);
}
}
httpCM.checkExpired();
if (endpointResolver != null) {
endpointResolver.checkExpired();
}
}
private Function<EndpointKey, SharedHttpClientConnectionGroup> httpEndpointProvider() {
return (key) -> {
int maxPoolSize = Math.max(poolOptions.getHttp1MaxSize(), poolOptions.getHttp2MaxSize());
ClientMetrics clientMetrics = HttpClientImpl.this.metrics != null ? HttpClientImpl.this.metrics.createEndpointMetrics(key.server, maxPoolSize) : null;
PoolMetrics poolMetrics = HttpClientImpl.this.metrics != null ? vertx.metrics().createPoolMetrics("http", key.server.toString(), maxPoolSize) : null;
ProxyOptions proxyOptions = key.proxyOptions;
if (proxyOptions != null && !key.ssl && proxyOptions.getType() == ProxyType.HTTP) {
SocketAddress server = SocketAddress.inetSocketAddress(proxyOptions.getPort(), proxyOptions.getHost());
key = new EndpointKey(key.ssl, key.sslOptions, proxyOptions, server, key.authority);
proxyOptions = null;
}
HttpConnectParams params = new HttpConnectParams();
params.sslOptions = key.sslOptions;
params.proxyOptions = proxyOptions;
params.ssl = key.ssl;
return new SharedHttpClientConnectionGroup(
clientMetrics,
connectHandler,
contextProvider,
poolMetrics,
poolOptions.getMaxWaitQueueSize(),
poolOptions.getHttp1MaxSize(),
poolOptions.getHttp2MaxSize(),
initialPoolKind,
connector,
params,
key.authority,
maxLifetime,
key.server);
};
}
@Override
protected void doShutdown(Completable<Void> p) {
synchronized (this) {
if (timerID >= 0) {
vertx.cancelTimer(timerID);
timerID = -1;
}
}
httpCM.shutdown();
super.doShutdown(p);
}
@Override
protected void doClose(Completable<Void> p) {
httpCM.close();
super.doClose(p);
}
public Function<HttpClientResponse, Future<RequestOptions>> redirectHandler() {
return redirectHandler;
}
@Override
public Future<io.vertx.core.http.HttpClientConnection> connect(HttpConnectOptions connect) {
Address addr = connect.getServer();
Integer port = connect.getPort();
String host = connect.getHost();
SocketAddress server;
if (addr == null) {
if (port == null) {
port = defaultPort;
}
if (host == null) {
host = defaultHost;
}
server = SocketAddress.inetSocketAddress(port, host);
} else if (addr instanceof SocketAddress) {
server = (SocketAddress) addr;
if (port == null) {
port = connect.getPort();
}
if (host == null) {
host = connect.getHost();
}
if (port == null) {
port = server.port();
}
if (host == null) {
host = server.host();
}
} else {
throw new IllegalArgumentException("Only socket address are currently supported");
}
HostAndPort authority = HostAndPort.create(host, port);
ClientSSLOptions sslOptions = sslOptions(connect);
ProxyOptions proxyOptions = computeProxyOptions(connect.getProxyOptions(), server);
ClientMetrics clientMetrics = metrics != null ? metrics.createEndpointMetrics(server, 1) : null;
Boolean ssl = connect.isSsl();
boolean useSSL = ssl != null ? ssl : defaultSsl;
checkClosed();
HttpConnectParams params = new HttpConnectParams();
params.sslOptions = sslOptions;
params.proxyOptions = proxyOptions;
params.ssl = useSSL;
return (Future) connector.httpConnect(vertx.getOrCreateContext(), server, authority, params, 0L, clientMetrics).map(conn -> new UnpooledHttpClientConnection(conn).init());
}
@Override
public Future<HttpClientRequest> request(RequestOptions request) {
Address addr = request.getServer();
Integer port = request.getPort();
String host = request.getHost();
if (addr == null) {
if (port == null) {
port = defaultPort;
}
if (host == null) {
host = defaultHost;
}
addr = SocketAddress.inetSocketAddress(port, host);
} else if (addr instanceof SocketAddress) {
SocketAddress socketAddr = (SocketAddress) addr;
if (port == null) {
port = request.getPort();
}
if (host == null) {
host = request.getHost();
}
if (port == null) {
port = socketAddr.port();
}
if (host == null) {
host = socketAddr.host();
}
}
return doRequest(addr, port, host, request);
}
private Future<HttpClientRequest> doRequest(Address server, Integer port, String host, RequestOptions request) {
if (server == null) {
throw new NullPointerException();
}
HttpMethod method = request.getMethod();
String requestURI = request.getURI();
Boolean ssl = request.isSsl();
MultiMap headers = request.getHeaders();
long connectTimeout = 0L;
long idleTimeout = 0L;
if (request.getTimeout() >= 0L) {
connectTimeout = request.getTimeout();
idleTimeout = request.getTimeout();
}
if (request.getConnectTimeout() >= 0L) {
connectTimeout = request.getConnectTimeout();
}
if (request.getIdleTimeout() >= 0L) {
idleTimeout = request.getIdleTimeout();
}
Boolean followRedirects = request.getFollowRedirects();
Objects.requireNonNull(method, "no null method accepted");
Objects.requireNonNull(requestURI, "no null requestURI accepted");
boolean useSSL = ssl != null ? ssl : defaultSsl;
checkClosed();
HostAndPort authority;
// should we do that here ? it might create issues with address resolver that resolves this later
if (host != null && port != null) {
String peerHost = host;
// if (peerHost.endsWith(".")) {
// peerHost = peerHost.substring(0, peerHost.length() - 1);
// }
authority = HostAndPort.create(peerHost, port);
} else {
authority = null;
}
ClientSSLOptions sslOptions = sslOptions(request);
return doRequest(request.getRoutingKey(), method, authority, server, useSSL, requestURI, headers, request.getTraceOperation(), connectTimeout, idleTimeout, followRedirects, sslOptions, request.getProxyOptions());
}
private Future<HttpClientRequest> doRequest(
String routingKey,
HttpMethod method,
HostAndPort authority,
Address server,
boolean useSSL,
String requestURI,
MultiMap headers,
String traceOperation,
long connectTimeout,
long idleTimeout,
Boolean followRedirects,
ClientSSLOptions sslOptions,
ProxyOptions proxyConfig) {
ContextInternal streamCtx = vertx.getOrCreateContext();
Future<ConnectionObtainedResult> future;
if (endpointResolver != null) {
PromiseInternal<Endpoint> promise = vertx.promise();
endpointResolver.lookupEndpoint(server, promise);
future = promise.future()
.map(endpoint -> endpoint.selectServer(routingKey))
.compose(lookup -> {
SocketAddress address = lookup.address();
ProxyOptions proxyOptions = computeProxyOptions(proxyConfig, address);
EndpointKey key = new EndpointKey(useSSL, sslOptions, proxyOptions, address, authority != null ? authority : HostAndPort.create(address.host(), address.port()));
return httpCM.withResourceAsync(key, httpEndpointProvider(), (endpoint, created) -> {
Future<Lease<HttpClientConnection>> fut2 = endpoint.requestConnection(streamCtx, connectTimeout);
if (fut2 == null) {
return null;
} else {
ServerInteraction endpointRequest = lookup.newInteraction();
return fut2.andThen(ar -> {
if (ar.failed()) {
endpointRequest.reportFailure(ar.cause());
}
}).compose(lease -> {
HttpClientConnection conn = lease.get();
return conn.createStream(streamCtx).map(stream -> {
HttpClientStream wrapped = new StatisticsGatheringHttpClientStream(stream, endpointRequest);
wrapped.closeHandler(v -> lease.recycle());
return new ConnectionObtainedResult(proxyOptions, wrapped, lease);
});
});
}
});
});
} else if (server instanceof SocketAddress) {
ProxyOptions proxyOptions = computeProxyOptions(proxyConfig, (SocketAddress) server);
EndpointKey key = new EndpointKey(useSSL, sslOptions, proxyOptions, (SocketAddress) server, authority);
future = httpCM.withResourceAsync(key, httpEndpointProvider(), (endpoint, created) -> {
Future<Lease<HttpClientConnection>> fut = endpoint.requestConnection(streamCtx, connectTimeout);
if (fut == null) {
return null;
} else {
return fut.compose(lease -> {
HttpClientConnection conn = lease.get();
return conn.createStream(streamCtx).map(stream -> {
stream.closeHandler(v -> {
lease.recycle();
});
return new ConnectionObtainedResult(proxyOptions, stream, lease);
});
});
}
});
} else {
future = streamCtx.failedFuture("Cannot resolve address " + server);
}
if (future == null) {
return streamCtx.failedFuture("Cannot resolve address " + server);
} else {
return future.map(res -> {
RequestOptions options = new RequestOptions();
options.setMethod(method);
options.setHeaders(headers);
options.setURI(requestURI);
options.setProxyOptions(res.proxyOptions);
options.setIdleTimeout(idleTimeout);
options.setFollowRedirects(followRedirects);
options.setTraceOperation(traceOperation);
HttpClientStream stream = res.stream;
HttpClientRequestImpl request = createRequest(stream.connection(), stream, options);
stream.closeHandler(v -> {
res.lease.recycle();
request.handleClosed();
});
return request;
});
}
}
private static | PoolChecker |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/CheckpointStorageLoaderTest.java | {
"start": 24057,
"end": 24805
} | class ____ extends TypeSafeMatcher<Path> {
private final Path reNormalizedExpected;
private NormalizedPathMatcher(Path expected) {
this.reNormalizedExpected = expected == null ? null : new Path(expected.toString());
}
@Override
protected boolean matchesSafely(Path actual) {
if (reNormalizedExpected == null) {
return actual == null;
}
Path reNormalizedActual = new Path(actual.toString());
return reNormalizedExpected.equals(reNormalizedActual);
}
@Override
public void describeTo(Description description) {
description.appendValue(reNormalizedExpected);
}
}
}
| NormalizedPathMatcher |
java | junit-team__junit5 | junit-platform-suite-api/src/main/java/org/junit/platform/suite/api/SuiteDisplayName.java | {
"start": 1264,
"end": 1463
} | interface ____ {
/**
* Custom display name for the annotated class.
*
* @return a custom display name; never blank or consisting solely of
* whitespace
*/
String value();
}
| SuiteDisplayName |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/tasks/TaskManager.java | {
"start": 24506,
"end": 31279
} | class ____ {
private final CancellableTask task;
private boolean finished = false;
private List<Runnable> cancellationListeners = null;
private Map<Transport.Connection, Integer> childTasksPerConnection = null;
private String banChildrenReason;
private List<Runnable> childTaskCompletedListeners = null;
CancellableTaskHolder(CancellableTask task) {
this.task = task;
}
void cancel(String reason, Runnable listener) {
final Runnable toRun;
synchronized (this) {
if (finished) {
assert cancellationListeners == null;
toRun = listener;
} else {
toRun = () -> {};
if (listener != null) {
if (cancellationListeners == null) {
cancellationListeners = new ArrayList<>();
}
cancellationListeners.add(listener);
}
}
}
try {
task.cancel(reason);
} finally {
if (toRun != null) {
toRun.run();
}
}
}
void cancel(String reason) {
task.cancel(reason);
}
/**
* Marks task as finished.
*/
public void finish() {
final List<Runnable> listeners;
synchronized (this) {
this.finished = true;
if (cancellationListeners == null) {
return;
}
listeners = cancellationListeners;
cancellationListeners = null;
}
// We need to call the listener outside of the synchronised section to avoid potential bottle necks
// in the listener synchronization
notifyListeners(listeners);
}
private void notifyListeners(List<Runnable> listeners) {
assert Thread.holdsLock(this) == false;
Exception rootException = null;
for (Runnable listener : listeners) {
try {
listener.run();
} catch (RuntimeException inner) {
rootException = ExceptionsHelper.useOrSuppress(rootException, inner);
}
}
ExceptionsHelper.reThrowIfNotNull(rootException);
}
public CancellableTask getTask() {
return task;
}
synchronized void registerChildConnection(Transport.Connection connection) {
if (banChildrenReason != null) {
throw new TaskCancelledException("parent task was cancelled [" + banChildrenReason + ']');
}
if (childTasksPerConnection == null) {
childTasksPerConnection = new HashMap<>();
}
childTasksPerConnection.merge(connection, 1, Integer::sum);
}
void unregisterChildConnection(Transport.Connection node) {
final List<Runnable> listeners;
synchronized (this) {
if (childTasksPerConnection.merge(node, -1, Integer::sum) == 0) {
childTasksPerConnection.remove(node);
}
if (childTasksPerConnection.isEmpty() == false || this.childTaskCompletedListeners == null) {
return;
}
listeners = childTaskCompletedListeners;
childTaskCompletedListeners = null;
}
notifyListeners(listeners);
}
Set<Transport.Connection> startBan(String reason, Runnable onChildTasksCompleted) {
final Set<Transport.Connection> pendingChildConnections;
final Runnable toRun;
synchronized (this) {
assert reason != null;
// noinspection ConstantConditions just in case we get a null value with assertions disabled
banChildrenReason = reason == null ? "none" : reason;
if (childTasksPerConnection == null) {
pendingChildConnections = Collections.emptySet();
} else {
pendingChildConnections = Set.copyOf(childTasksPerConnection.keySet());
}
if (pendingChildConnections.isEmpty()) {
assert childTaskCompletedListeners == null;
toRun = onChildTasksCompleted;
} else {
toRun = () -> {};
if (childTaskCompletedListeners == null) {
childTaskCompletedListeners = new ArrayList<>();
}
childTaskCompletedListeners.add(onChildTasksCompleted);
}
}
toRun.run();
return pendingChildConnections;
}
}
/**
* Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of
* pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task.
*
* @return a releasable that should be called when this pending task is completed
*/
public Releasable startTrackingCancellableChannelTask(TcpChannel channel, CancellableTask task) {
assert cancellableTasks.get(task.getId()) != null : "task [" + task.getId() + "] is not registered yet";
final ChannelPendingTaskTracker tracker = startTrackingChannel(channel, trackerChannel -> trackerChannel.addTask(task));
return () -> tracker.removeTask(task);
}
private ChannelPendingTaskTracker startTrackingChannel(TcpChannel channel, Consumer<ChannelPendingTaskTracker> onRegister) {
final ChannelPendingTaskTracker tracker = channelPendingTaskTrackers.compute(channel, (k, curr) -> {
if (curr == null) {
curr = new ChannelPendingTaskTracker();
}
onRegister.accept(curr);
return curr;
});
if (tracker.registered.compareAndSet(false, true)) {
channel.addCloseListener(ActionListener.running(() -> {
final ChannelPendingTaskTracker removedTracker = channelPendingTaskTrackers.remove(channel);
assert removedTracker == tracker;
onChannelClosed(tracker);
}));
}
return tracker;
}
// for testing
final int numberOfChannelPendingTaskTrackers() {
return channelPendingTaskTrackers.size();
}
private static final ChannelPendingTaskTracker DIRECT_CHANNEL_TRACKER = new ChannelPendingTaskTracker();
private static | CancellableTaskHolder |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/builder/BuilderTest_error.java | {
"start": 233,
"end": 616
} | class ____ extends TestCase {
public void test_0() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"id\":12304,\"name\":\"ljw\"}", VO.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
@JSONType(builder = VOBuilder.class)
public static | BuilderTest_error |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/main/java/org/springframework/boot/micrometer/metrics/actuate/endpoint/MetricsEndpoint.java | {
"start": 5995,
"end": 6293
} | class ____ implements OperationResponseBody {
private final Set<String> names;
MetricNamesDescriptor(Set<String> names) {
this.names = names;
}
public Set<String> getNames() {
return this.names;
}
}
/**
* Description of a metric.
*/
public static final | MetricNamesDescriptor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java | {
"start": 3371,
"end": 3915
} | class ____ {
static final String TASKS = "tasks";
static final String EXECUTING = "executing";
static final String INSERT_ORDER = "insert_order";
static final String PRIORITY = "priority";
static final String SOURCE = "source";
static final String TIME_IN_QUEUE_MILLIS = "time_in_queue_millis";
static final String TIME_IN_QUEUE = "time_in_queue";
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(pendingTasks);
}
}
| Fields |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/statistics/StatisticsManager.java | {
"start": 1170,
"end": 5759
} | class ____ {
/**
* Set of Statistics Kind Metadata
*/
private Map<String, StatisticsKindMeta> kindMetaMap;
/**
* item names to calculate statistics brief
*/
private Pair<String, long[][]>[] briefMetas;
/**
* Statistics
*/
private final ConcurrentHashMap<String, ConcurrentHashMap<String, StatisticsItem>> statsTable
= new ConcurrentHashMap<>();
private static final int MAX_IDLE_TIME = 10 * 60 * 1000;
private final ScheduledExecutorService executor = ThreadUtils.newSingleThreadScheduledExecutor(
"StatisticsManagerCleaner", true);
private StatisticsItemStateGetter statisticsItemStateGetter;
public StatisticsManager() {
kindMetaMap = new HashMap<>();
start();
}
public StatisticsManager(Map<String, StatisticsKindMeta> kindMeta) {
this.kindMetaMap = kindMeta;
start();
}
public void addStatisticsKindMeta(StatisticsKindMeta kindMeta) {
kindMetaMap.put(kindMeta.getName(), kindMeta);
statsTable.putIfAbsent(kindMeta.getName(), new ConcurrentHashMap<>(16));
}
public void setBriefMeta(Pair<String, long[][]>[] briefMetas) {
this.briefMetas = briefMetas;
}
private void start() {
int maxIdleTime = MAX_IDLE_TIME;
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
Iterator<Map.Entry<String, ConcurrentHashMap<String, StatisticsItem>>> iter
= statsTable.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<String, ConcurrentHashMap<String, StatisticsItem>> entry = iter.next();
String kind = entry.getKey();
ConcurrentHashMap<String, StatisticsItem> itemMap = entry.getValue();
if (itemMap == null || itemMap.isEmpty()) {
continue;
}
HashMap<String, StatisticsItem> tmpItemMap = new HashMap<>(itemMap);
for (StatisticsItem item : tmpItemMap.values()) {
// remove when expired
if (System.currentTimeMillis() - item.getLastTimeStamp().get() > MAX_IDLE_TIME
&& (statisticsItemStateGetter == null || !statisticsItemStateGetter.online(item))) {
remove(item);
}
}
}
}
}, maxIdleTime, maxIdleTime / 3, TimeUnit.MILLISECONDS);
}
/**
* Increment a StatisticsItem
*
* @param kind
* @param key
* @param itemAccumulates
*/
public boolean inc(String kind, String key, long... itemAccumulates) {
ConcurrentHashMap<String, StatisticsItem> itemMap = statsTable.get(kind);
if (itemMap != null) {
StatisticsItem item = itemMap.get(key);
// if not exist, create and schedule
if (item == null) {
item = new StatisticsItem(kind, key, kindMetaMap.get(kind).getItemNames());
item.setInterceptor(new StatisticsBriefInterceptor(item, briefMetas));
StatisticsItem oldItem = itemMap.putIfAbsent(key, item);
if (oldItem != null) {
item = oldItem;
} else {
scheduleStatisticsItem(item);
}
}
// do increment
item.incItems(itemAccumulates);
return true;
}
return false;
}
private void scheduleStatisticsItem(StatisticsItem item) {
kindMetaMap.get(item.getStatKind()).getScheduledPrinter().schedule(item);
}
public void remove(StatisticsItem item) {
ConcurrentHashMap<String, StatisticsItem> itemMap = statsTable.get(item.getStatKind());
if (itemMap != null) {
itemMap.remove(item.getStatObject(), item);
}
StatisticsKindMeta kindMeta = kindMetaMap.get(item.getStatKind());
if (kindMeta != null && kindMeta.getScheduledPrinter() != null) {
kindMeta.getScheduledPrinter().remove(item);
}
}
public StatisticsItemStateGetter getStatisticsItemStateGetter() {
return statisticsItemStateGetter;
}
public void setStatisticsItemStateGetter(StatisticsItemStateGetter statisticsItemStateGetter) {
this.statisticsItemStateGetter = statisticsItemStateGetter;
}
public void shutdown() {
executor.shutdown();
}
}
| StatisticsManager |
java | apache__spark | common/sketch/src/main/java/org/apache/spark/util/sketch/Utils.java | {
"start": 884,
"end": 1541
} | class ____ {
public static byte[] getBytesFromUTF8String(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static long integralToLong(Object i) {
long longValue;
if (i instanceof Long longVal) {
longValue = longVal;
} else if (i instanceof Integer integer) {
longValue = integer.longValue();
} else if (i instanceof Short shortVal) {
longValue = shortVal.longValue();
} else if (i instanceof Byte byteVal) {
longValue = byteVal.longValue();
} else {
throw new IllegalArgumentException("Unsupported data type " + i.getClass().getName());
}
return longValue;
}
}
| Utils |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/apt-in-annotation-processor-paths/src/main/java/org/acme/HelloResource.java | {
"start": 164,
"end": 329
} | class ____ {
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return MyEntity_.FIELD+"/"+QMyEntity.myEntity.field;
}
}
| HelloResource |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_hasExactlyElementsOfTypes_Test.java | {
"start": 957,
"end": 1389
} | class ____ extends ObjectArrayAssertBaseTest {
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.hasExactlyElementsOfTypes(Integer.class, Double.class);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasExactlyElementsOfTypes(getInfo(assertions), getActual(assertions), Integer.class, Double.class);
}
}
| IterableAssert_hasExactlyElementsOfTypes_Test |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/utils/OperationExpressionsUtils.java | {
"start": 5766,
"end": 7073
} | class ____
extends ApiExpressionDefaultVisitor<Void> {
private int uniqueId = 0;
private final Map<Expression, String> aggregates = new LinkedHashMap<>();
private final Map<Expression, String> properties = new LinkedHashMap<>();
@Override
public Void visit(LookupCallExpression unresolvedCall) {
throw new IllegalStateException(
"All lookup calls should be resolved by now. Got: " + unresolvedCall);
}
@Override
public Void visit(UnresolvedCallExpression unresolvedCall) {
FunctionDefinition functionDefinition = unresolvedCall.getFunctionDefinition();
if (isFunctionOfKind(unresolvedCall, AGGREGATE)) {
aggregates.computeIfAbsent(unresolvedCall, expr -> "EXPR$" + uniqueId++);
} else if (WINDOW_PROPERTIES.contains(functionDefinition)) {
properties.computeIfAbsent(unresolvedCall, expr -> "EXPR$" + uniqueId++);
} else {
unresolvedCall.getChildren().forEach(c -> c.accept(this));
}
return null;
}
@Override
protected Void defaultMethod(Expression expression) {
return null;
}
}
private static | AggregationAndPropertiesSplitter |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/builders/HttpSecurity.java | {
"start": 56310,
"end": 58583
} | class ____ {
*
* @Bean
* public SecurityFilterChain securityFilterChain(HttpSecurity http) {
* http
* .authorizeHttpRequests((authorizeHttpRequests) ->
* authorizeHttpRequests
* .anyRequest().authenticated()
* )
* .oauth2ResourceServer((oauth2ResourceServer) ->
* oauth2ResourceServer
* .jwt((jwt) ->
* jwt
* .decoder(jwtDecoder())
* )
* );
* return http.build();
* }
*
* @Bean
* public JwtDecoder jwtDecoder() {
* return NimbusJwtDecoder.withPublicKey(this.key).build();
* }
* }
* </pre>
* @param oauth2ResourceServerCustomizer the {@link Customizer} to provide more
* options for the {@link OAuth2ResourceServerConfigurer}
* @return the {@link HttpSecurity} for further customizations
* @ @see
* <a target="_blank" href= "https://tools.ietf.org/html/rfc6749#section-1.1">OAuth
* 2.0 Authorization Framework</a>
*/
public HttpSecurity oauth2ResourceServer(
Customizer<OAuth2ResourceServerConfigurer<HttpSecurity>> oauth2ResourceServerCustomizer) {
OAuth2ResourceServerConfigurer<HttpSecurity> configurer = getOrApply(
new OAuth2ResourceServerConfigurer<>(getContext()));
this.postProcess(configurer);
oauth2ResourceServerCustomizer.customize(configurer);
return HttpSecurity.this;
}
/**
* Configures OAuth 2.1 Authorization Server support.
* @param oauth2AuthorizationServerCustomizer the {@link Customizer} providing access
* to the {@link OAuth2AuthorizationServerConfigurer} for further customizations
* @return the {@link HttpSecurity} for further customizations
* @ @since 7.0
* @see <a target="_blank" href=
* "https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-13.html">OAuth 2.1
* Authorization Framework</a>
*/
public HttpSecurity oauth2AuthorizationServer(
Customizer<OAuth2AuthorizationServerConfigurer> oauth2AuthorizationServerCustomizer) {
oauth2AuthorizationServerCustomizer.customize(getOrApply(new OAuth2AuthorizationServerConfigurer()));
return HttpSecurity.this;
}
/**
* Configures One-Time Token Login Support.
*
* <h2>Example Configuration</h2>
*
* <pre>
* @Configuration
* @EnableWebSecurity
* public | OAuth2ResourceServerSecurityConfig |
java | google__dagger | javatests/artifacts/hilt-android/simple/uitest/src/main/java/dagger/hilt/android/simple/uitest/UITestOne.java | {
"start": 1064,
"end": 1120
} | class ____ {
@Test
public void emptyTest() {}
} | UITestOne |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/publisher/SnapshotGenerator.java | {
"start": 1547,
"end": 3748
} | class ____ {
private final Emitter emitter;
private int nodeId = 0;
private Time time = Time.SYSTEM;
private FaultHandler faultHandler = (m, e) -> null;
private long maxBytesSinceLastSnapshot = 100 * 1024L * 1024L;
private long maxTimeSinceLastSnapshotNs = TimeUnit.DAYS.toNanos(1);
private AtomicReference<String> disabledReason = null;
private String threadNamePrefix = "";
public Builder(Emitter emitter) {
this.emitter = emitter;
}
public Builder setNodeId(int nodeId) {
this.nodeId = nodeId;
return this;
}
public Builder setTime(Time time) {
this.time = time;
return this;
}
public Builder setFaultHandler(FaultHandler faultHandler) {
this.faultHandler = faultHandler;
return this;
}
public Builder setMaxBytesSinceLastSnapshot(long maxBytesSinceLastSnapshot) {
this.maxBytesSinceLastSnapshot = maxBytesSinceLastSnapshot;
return this;
}
public Builder setMaxTimeSinceLastSnapshotNs(long maxTimeSinceLastSnapshotNs) {
this.maxTimeSinceLastSnapshotNs = maxTimeSinceLastSnapshotNs;
return this;
}
public Builder setDisabledReason(AtomicReference<String> disabledReason) {
this.disabledReason = disabledReason;
return this;
}
public Builder setThreadNamePrefix(String threadNamePrefix) {
this.threadNamePrefix = threadNamePrefix;
return this;
}
public SnapshotGenerator build() {
if (disabledReason == null) {
disabledReason = new AtomicReference<>();
}
return new SnapshotGenerator(
nodeId,
time,
emitter,
faultHandler,
maxBytesSinceLastSnapshot,
maxTimeSinceLastSnapshotNs,
disabledReason,
threadNamePrefix
);
}
}
/**
* The callback which actually generates the snapshot.
*/
public | Builder |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java | {
"start": 2443,
"end": 10563
} | class ____ {
private static final String SECRET_STR = "secret";
private static final String HTTP_USER = "HTTP";
private static final String PREFIX = "hadoop.http.authentication.";
private static final long TIMEOUT = 20000;
private static File httpSpnegoKeytabFile = new File(
KerberosTestUtils.getKeytabFile());
private static String httpSpnegoPrincipal =
KerberosTestUtils.getServerPrincipal();
private static String realm = KerberosTestUtils.getRealm();
private static File testRootDir = new File("target",
TestHttpServerWithSpnego.class.getName() + "-root");
private static MiniKdc testMiniKDC;
private static File secretFile = new File(testRootDir, SECRET_STR);
@BeforeAll
public static void setUp() throws Exception {
try {
testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
testMiniKDC.start();
testMiniKDC.createPrincipal(
httpSpnegoKeytabFile, HTTP_USER + "/localhost");
} catch (Exception e) {
assertTrue(false, "Couldn't setup MiniKDC");
}
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
}
@AfterAll
public static void tearDown() {
if (testMiniKDC != null) {
testMiniKDC.stop();
}
}
/**
* groupA
* - userA
* groupB
* - userA, userB
* groupC
* - userC
* SPNEGO filter has been enabled.
* userA has the privilege to impersonate users in groupB.
* userA has admin access to all default servlets, but userB
* and userC don't have. So "/logs" can only be accessed by userA.
* @throws Exception
*/
@Test
public void testAuthenticationWithProxyUser() throws Exception {
Configuration spnegoConf = getSpnegoConf(new Configuration());
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
ProxyUserAuthenticationFilterInitializer.class.getName());
//setup logs dir
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
// Setup user group
UserGroupInformation.createUserForTesting("userA",
new String[]{"groupA", "groupB"});
UserGroupInformation.createUserForTesting("userB",
new String[]{"groupB"});
UserGroupInformation.createUserForTesting("userC",
new String[]{"groupC"});
// Make userA impersonate users in groupB
spnegoConf.set("hadoop.proxyuser.userA.hosts", "*");
spnegoConf.set("hadoop.proxyuser.userA.groups", "groupB");
ProxyUsers.refreshSuperUserGroupsConfiguration(spnegoConf);
HttpServer2 httpServer = null;
try {
// Create http server to test.
httpServer = getCommonBuilder()
.setConf(spnegoConf)
.setACL(new AccessControlList("userA groupA"))
.build();
httpServer.start();
// Get signer to encrypt token
Signer signer = getSignerToEncrypt();
// setup auth token for userA
AuthenticatedURL.Token token = getEncryptedAuthToken(signer, "userA");
String serverURL = "http://" +
NetUtils.getHostPortString(httpServer.getConnectorAddress(0)) + "/";
// The default authenticator is kerberos.
AuthenticatedURL authUrl = new AuthenticatedURL();
// userA impersonates userB, it's allowed.
for (String servlet :
new String[]{"stacks", "jmx", "conf"}) {
HttpURLConnection conn = authUrl
.openConnection(new URL(serverURL + servlet + "?doAs=userB"),
token);
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
// userA cannot impersonate userC, it fails.
for (String servlet :
new String[]{"stacks", "jmx", "conf"}){
HttpURLConnection conn = authUrl
.openConnection(new URL(serverURL + servlet + "?doAs=userC"),
token);
assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
}
// "/logs" and "/logLevel" require admin authorization,
// only userA has the access.
for (String servlet :
new String[]{"logLevel", "logs"}) {
HttpURLConnection conn = authUrl
.openConnection(new URL(serverURL + servlet), token);
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
// Setup token for userB
token = getEncryptedAuthToken(signer, "userB");
// userB cannot access these servlets.
for (String servlet :
new String[]{"logLevel", "logs"}) {
HttpURLConnection conn = authUrl
.openConnection(new URL(serverURL + servlet), token);
assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
}
} finally {
if (httpServer != null) {
httpServer.stop();
}
}
}
@Test
public void testAuthenticationToAllowList() throws Exception {
Configuration spnegoConf = getSpnegoConf(new Configuration());
String[] allowList = new String[] {"/jmx", "/prom"};
String[] denyList = new String[] {"/conf", "/stacks", "/logLevel"};
spnegoConf.set(PREFIX + "kerberos.endpoint.whitelist", String.join(",", allowList));
spnegoConf.set(CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED, "true");
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
AuthenticationFilterInitializer.class.getName());
//setup logs dir
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
HttpServer2 httpServer = null;
try {
// Create http server to test.
httpServer = getCommonBuilder().setConf(spnegoConf).setSecurityEnabled(true)
.setUsernameConfKey(PREFIX + "kerberos.principal")
.setKeytabConfKey(PREFIX + "kerberos.keytab").build();
httpServer.start();
String serverURL = "http://" + NetUtils.getHostPortString(httpServer.getConnectorAddress(0));
// endpoints in whitelist should not require Kerberos authentication
for (String endpoint : allowList) {
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
// endpoints not in whitelist should require Kerberos authentication
for (String endpoint : denyList) {
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
}
} finally {
if (httpServer != null) {
httpServer.stop();
}
}
}
private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer,
String user) throws Exception {
AuthenticationToken token =
new AuthenticationToken(user, user, "kerberos");
token.setExpires(System.currentTimeMillis() + TIMEOUT);
return new AuthenticatedURL.Token(signer.sign(token.toString()));
}
private Signer getSignerToEncrypt() throws Exception {
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, SECRET_STR);
secretProvider.init(secretProviderProps, null, TIMEOUT);
return new Signer(secretProvider);
}
private Configuration getSpnegoConf(Configuration conf) {
conf = new Configuration();
conf.set(PREFIX + "type", "kerberos");
conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
conf.set(PREFIX + "signature.secret.file",
secretFile.getAbsolutePath());
conf.set(PREFIX + "kerberos.keytab",
httpSpnegoKeytabFile.getAbsolutePath());
conf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal);
conf.set(PREFIX + "cookie.domain", realm);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
return conf;
}
private HttpServer2.Builder getCommonBuilder() throws Exception {
return new HttpServer2.Builder().setName("test")
.addEndpoint(new URI("http://localhost:0"))
.setFindPort(true);
}
} | TestHttpServerWithSpnego |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit4/RepeatedSpringRunnerTests.java | {
"start": 3975,
"end": 4192
} | class ____ extends AbstractRepeatedTestCase {
@Test
@Repeat
@Timed(millis = 10000)
public void defaultRepeatValue() {
incrementInvocationCount();
}
}
public static final | DefaultRepeatValueRepeatedTestCase |
java | apache__camel | components/camel-jms/src/main/java/org/apache/camel/component/jms/reply/UseMessageIdAsCorrelationIdMessageSentCallback.java | {
"start": 1365,
"end": 2213
} | class ____ implements MessageSentCallback {
private final ReplyManager replyManager;
private final String correlationId;
private final long requestTimeout;
public UseMessageIdAsCorrelationIdMessageSentCallback(ReplyManager replyManager, String correlationId,
long requestTimeout) {
this.replyManager = replyManager;
this.correlationId = correlationId;
this.requestTimeout = requestTimeout;
}
@Override
public void sent(Session session, Message message, Destination destination) {
String newCorrelationID = getJMSMessageID(message);
if (newCorrelationID != null) {
replyManager.updateCorrelationId(correlationId, newCorrelationID, requestTimeout);
}
}
}
| UseMessageIdAsCorrelationIdMessageSentCallback |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/client/consumer/broadcast/tag/BroadcastTwoConsumerSubTagIT.java | {
"start": 1521,
"end": 3189
} | class ____ extends BaseBroadcast {
private static Logger logger = LoggerFactory.getLogger(BroadcastTwoConsumerSubTagIT.class);
private RMQNormalProducer producer = null;
private String topic = null;
@Before
public void setUp() {
topic = initTopic();
logger.info(String.format("use topic: %s;", topic));
producer = getProducer(NAMESRV_ADDR, topic);
}
@After
public void tearDown() {
super.shutdown();
}
@Test
public void testTwoConsumerSubTag() {
int msgSize = 20;
String tag = "jueyin_tag";
RMQBroadCastConsumer consumer1 = getBroadCastConsumer(NAMESRV_ADDR, topic, tag,
new RMQNormalListener());
RMQBroadCastConsumer consumer2 = getBroadCastConsumer(NAMESRV_ADDR,
consumer1.getConsumerGroup(), topic, tag, new RMQNormalListener());
TestUtils.waitForSeconds(WAIT_TIME);
producer.send(tag, msgSize);
Assert.assertEquals("Not all sent succeeded", msgSize, producer.getAllUndupMsgBody().size());
consumer1.getListener().waitForMessageConsume(producer.getAllMsgBody(), CONSUME_TIME);
consumer2.getListener().waitForMessageConsume(producer.getAllMsgBody(), CONSUME_TIME);
assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(),
consumer1.getListener().getAllMsgBody()))
.containsExactlyElementsIn(producer.getAllMsgBody());
assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(),
consumer2.getListener().getAllMsgBody()))
.containsExactlyElementsIn(producer.getAllMsgBody());
}
}
| BroadcastTwoConsumerSubTagIT |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/utils/UrlUtilsTest.java | {
"start": 1568,
"end": 23988
} | class ____ {
String localAddress = "127.0.0.1";
@Test
void testAddressNull() {
String exceptionMessage = "Address is not allowed to be empty, please re-enter.";
try {
UrlUtils.parseURL(null, null);
} catch (IllegalArgumentException illegalArgumentException) {
assertEquals(exceptionMessage, illegalArgumentException.getMessage());
}
}
@Test
void testParseUrl() {
String address = "remote://root:alibaba@127.0.0.1:9090/dubbo.test.api";
URL url = UrlUtils.parseURL(address, null);
assertEquals(localAddress + ":9090", url.getAddress());
assertEquals("root", url.getUsername());
assertEquals("alibaba", url.getPassword());
assertEquals("dubbo.test.api", url.getPath());
assertEquals(9090, url.getPort());
assertEquals("remote", url.getProtocol());
}
@Test
void testParseURLWithSpecial() {
String address = "127.0.0.1:2181?backup=127.0.0.1:2182,127.0.0.1:2183";
assertEquals("dubbo://" + address, UrlUtils.parseURL(address, null).toString());
}
@Test
void testDefaultUrl() {
String address = "127.0.0.1";
URL url = UrlUtils.parseURL(address, null);
assertEquals(localAddress + ":9090", url.getAddress());
assertEquals(9090, url.getPort());
assertEquals("dubbo", url.getProtocol());
assertNull(url.getUsername());
assertNull(url.getPassword());
assertNull(url.getPath());
}
@Test
void testParseFromParameter() {
String address = "127.0.0.1";
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("username", "root");
parameters.put("password", "alibaba");
parameters.put("port", "10000");
parameters.put("protocol", "dubbo");
parameters.put("path", "dubbo.test.api");
parameters.put("aaa", "bbb");
parameters.put("ccc", "ddd");
URL url = UrlUtils.parseURL(address, parameters);
assertEquals(localAddress + ":10000", url.getAddress());
assertEquals("root", url.getUsername());
assertEquals("alibaba", url.getPassword());
assertEquals(10000, url.getPort());
assertEquals("dubbo", url.getProtocol());
assertEquals("dubbo.test.api", url.getPath());
assertEquals("bbb", url.getParameter("aaa"));
assertEquals("ddd", url.getParameter("ccc"));
}
@Test
void testParseUrl2() {
String address = "192.168.0.1";
String backupAddress1 = "192.168.0.2";
String backupAddress2 = "192.168.0.3";
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("username", "root");
parameters.put("password", "alibaba");
parameters.put("port", "10000");
parameters.put("protocol", "dubbo");
URL url = UrlUtils.parseURL(address + "," + backupAddress1 + "," + backupAddress2, parameters);
assertEquals("192.168.0.1:10000", url.getAddress());
assertEquals("root", url.getUsername());
assertEquals("alibaba", url.getPassword());
assertEquals(10000, url.getPort());
assertEquals("dubbo", url.getProtocol());
assertEquals("192.168.0.2" + "," + "192.168.0.3", url.getParameter("backup"));
}
@Test
void testParseUrls() {
String addresses = "192.168.0.1|192.168.0.2|192.168.0.3";
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("username", "root");
parameters.put("password", "alibaba");
parameters.put("port", "10000");
parameters.put("protocol", "dubbo");
List<URL> urls = UrlUtils.parseURLs(addresses, parameters);
assertEquals("192.168.0.1" + ":10000", urls.get(0).getAddress());
assertEquals("192.168.0.2" + ":10000", urls.get(1).getAddress());
}
@Test
void testParseUrlsAddressNull() {
String exceptionMessage = "Address is not allowed to be empty, please re-enter.";
try {
UrlUtils.parseURLs(null, null);
} catch (IllegalArgumentException illegalArgumentException) {
assertEquals(exceptionMessage, illegalArgumentException.getMessage());
}
}
@Test
void testConvertRegister() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, Map<String, String>> register = new HashMap<String, Map<String, String>>();
register.put(key, null);
Map<String, Map<String, String>> newRegister = UrlUtils.convertRegister(register);
assertEquals(register, newRegister);
}
@Test
void testConvertRegister2() {
String key = "dubbo.test.api.HelloService";
Map<String, Map<String, String>> register = new HashMap<String, Map<String, String>>();
Map<String, String> service = new HashMap<String, String>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "version=1.0.0&group=test&dubbo.version=2.0.0");
register.put(key, service);
Map<String, Map<String, String>> newRegister = UrlUtils.convertRegister(register);
Map<String, String> newService = new HashMap<String, String>();
newService.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "dubbo.version=2.0.0&group=test&version=1.0.0");
assertEquals(newService, newRegister.get("test/dubbo.test.api.HelloService:1.0.0"));
}
@Test
void testSubscribe() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, String> subscribe = new HashMap<String, String>();
subscribe.put(key, null);
Map<String, String> newSubscribe = UrlUtils.convertSubscribe(subscribe);
assertEquals(subscribe, newSubscribe);
}
@Test
void testSubscribe2() {
String key = "dubbo.test.api.HelloService";
Map<String, String> subscribe = new HashMap<String, String>();
subscribe.put(key, "version=1.0.0&group=test&dubbo.version=2.0.0");
Map<String, String> newSubscribe = UrlUtils.convertSubscribe(subscribe);
assertEquals(
"dubbo.version=2.0.0&group=test&version=1.0.0",
newSubscribe.get("test/dubbo.test.api.HelloService:1.0.0"));
}
@Test
void testRevertRegister() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, Map<String, String>> register = new HashMap<String, Map<String, String>>();
Map<String, String> service = new HashMap<String, String>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", null);
register.put(key, service);
Map<String, Map<String, String>> newRegister = UrlUtils.revertRegister(register);
Map<String, Map<String, String>> expectedRegister = new HashMap<String, Map<String, String>>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "group=perf&version=1.0.0");
expectedRegister.put("dubbo.test.api.HelloService", service);
assertEquals(expectedRegister, newRegister);
}
@Test
void testRevertRegister2() {
String key = "dubbo.test.api.HelloService";
Map<String, Map<String, String>> register = new HashMap<String, Map<String, String>>();
Map<String, String> service = new HashMap<String, String>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", null);
register.put(key, service);
Map<String, Map<String, String>> newRegister = UrlUtils.revertRegister(register);
Map<String, Map<String, String>> expectedRegister = new HashMap<String, Map<String, String>>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", null);
expectedRegister.put("dubbo.test.api.HelloService", service);
assertEquals(expectedRegister, newRegister);
}
@Test
void testRevertSubscribe() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, String> subscribe = new HashMap<String, String>();
subscribe.put(key, null);
Map<String, String> newSubscribe = UrlUtils.revertSubscribe(subscribe);
Map<String, String> expectSubscribe = new HashMap<String, String>();
expectSubscribe.put("dubbo.test.api.HelloService", "group=perf&version=1.0.0");
assertEquals(expectSubscribe, newSubscribe);
}
@Test
void testRevertSubscribe2() {
String key = "dubbo.test.api.HelloService";
Map<String, String> subscribe = new HashMap<String, String>();
subscribe.put(key, null);
Map<String, String> newSubscribe = UrlUtils.revertSubscribe(subscribe);
assertEquals(subscribe, newSubscribe);
}
@Test
void testRevertNotify() {
String key = "dubbo.test.api.HelloService";
Map<String, Map<String, String>> notify = new HashMap<String, Map<String, String>>();
Map<String, String> service = new HashMap<String, String>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "group=perf&version=1.0.0");
notify.put(key, service);
Map<String, Map<String, String>> newRegister = UrlUtils.revertNotify(notify);
Map<String, Map<String, String>> expectedRegister = new HashMap<String, Map<String, String>>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "group=perf&version=1.0.0");
expectedRegister.put("perf/dubbo.test.api.HelloService:1.0.0", service);
assertEquals(expectedRegister, newRegister);
}
@Test
void testRevertNotify2() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, Map<String, String>> notify = new HashMap<String, Map<String, String>>();
Map<String, String> service = new HashMap<String, String>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "group=perf&version=1.0.0");
notify.put(key, service);
Map<String, Map<String, String>> newRegister = UrlUtils.revertNotify(notify);
Map<String, Map<String, String>> expectedRegister = new HashMap<String, Map<String, String>>();
service.put("dubbo://127.0.0.1:20880/com.xxx.XxxService", "group=perf&version=1.0.0");
expectedRegister.put("perf/dubbo.test.api.HelloService:1.0.0", service);
assertEquals(expectedRegister, newRegister);
}
// backward compatibility for version 2.0.0
@Test
void testRevertForbid() {
String service = "dubbo.test.api.HelloService";
List<String> forbid = new ArrayList<String>();
forbid.add(service);
Set<URL> subscribed = new HashSet<URL>();
subscribed.add(URL.valueOf("dubbo://127.0.0.1:20880/" + service + "?group=perf&version=1.0.0"));
List<String> newForbid = UrlUtils.revertForbid(forbid, subscribed);
List<String> expectForbid = new ArrayList<String>();
expectForbid.add("perf/" + service + ":1.0.0");
assertEquals(expectForbid, newForbid);
}
@Test
void testRevertForbid2() {
List<String> newForbid = UrlUtils.revertForbid(null, null);
assertNull(newForbid);
}
@Test
void testRevertForbid3() {
String service1 = "dubbo.test.api.HelloService:1.0.0";
String service2 = "dubbo.test.api.HelloService:2.0.0";
List<String> forbid = new ArrayList<String>();
forbid.add(service1);
forbid.add(service2);
List<String> newForbid = UrlUtils.revertForbid(forbid, null);
assertEquals(forbid, newForbid);
}
@Test
void testIsMatch() {
URL consumerUrl = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?version=1.0.0&group=test");
URL providerUrl = URL.valueOf("http://127.0.0.1:8080/com.xxx.XxxService?version=1.0.0&group=test");
assertTrue(UrlUtils.isMatch(consumerUrl, providerUrl));
}
@Test
void testIsMatch2() {
URL consumerUrl = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?version=2.0.0&group=test");
URL providerUrl = URL.valueOf("http://127.0.0.1:8080/com.xxx.XxxService?version=1.0.0&group=test");
assertFalse(UrlUtils.isMatch(consumerUrl, providerUrl));
}
@Test
void testIsMatch3() {
URL consumerUrl = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?version=1.0.0&group=aa");
URL providerUrl = URL.valueOf("http://127.0.0.1:8080/com.xxx.XxxService?version=1.0.0&group=test");
assertFalse(UrlUtils.isMatch(consumerUrl, providerUrl));
}
@Test
void testIsMatch4() {
URL consumerUrl = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?version=1.0.0&group=*");
URL providerUrl = URL.valueOf("http://127.0.0.1:8080/com.xxx.XxxService?version=1.0.0&group=test");
assertTrue(UrlUtils.isMatch(consumerUrl, providerUrl));
}
@Test
void testIsMatch5() {
URL consumerUrl = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?version=*&group=test");
URL providerUrl = URL.valueOf("http://127.0.0.1:8080/com.xxx.XxxService?version=1.0.0&group=test");
assertTrue(UrlUtils.isMatch(consumerUrl, providerUrl));
}
@Test
void testIsItemMatch() throws Exception {
assertTrue(UrlUtils.isItemMatch(null, null));
assertTrue(!UrlUtils.isItemMatch("1", null));
assertTrue(!UrlUtils.isItemMatch(null, "1"));
assertTrue(UrlUtils.isItemMatch("1", "1"));
assertTrue(UrlUtils.isItemMatch("*", null));
assertTrue(UrlUtils.isItemMatch("*", "*"));
assertTrue(UrlUtils.isItemMatch("*", "1234"));
assertTrue(!UrlUtils.isItemMatch(null, "*"));
}
@Test
void testIsServiceKeyMatch() throws Exception {
URL url = URL.valueOf("test://127.0.0.1");
URL pattern = url.addParameter(GROUP_KEY, "test")
.addParameter(INTERFACE_KEY, "test")
.addParameter(VERSION_KEY, "test");
URL value = pattern;
assertTrue(UrlUtils.isServiceKeyMatch(pattern, value));
pattern = pattern.addParameter(GROUP_KEY, "*");
assertTrue(UrlUtils.isServiceKeyMatch(pattern, value));
pattern = pattern.addParameter(VERSION_KEY, "*");
assertTrue(UrlUtils.isServiceKeyMatch(pattern, value));
}
@Test
void testGetEmptyUrl() throws Exception {
URL url = UrlUtils.getEmptyUrl("dubbo/a.b.c.Foo:1.0.0", "test");
assertThat(url.toFullString(), equalTo("empty://0.0.0.0/a.b.c.Foo?category=test&group=dubbo&version=1.0.0"));
}
@Test
void testIsMatchGlobPattern() throws Exception {
assertTrue(UrlUtils.isMatchGlobPattern("*", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("", null));
assertFalse(UrlUtils.isMatchGlobPattern("", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("value", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("v*", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("*e", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("v*e", "value"));
assertTrue(UrlUtils.isMatchGlobPattern("$key", "value", URL.valueOf("dubbo://localhost:8080/Foo?key=v*e")));
}
@Test
void testIsMatchUrlWithDefaultPrefix() {
URL url = URL.valueOf("dubbo://127.0.0.1:20880/com.xxx.XxxService?default.version=1.0.0&default.group=test");
assertEquals("1.0.0", url.getVersion());
assertEquals("1.0.0", url.getParameter("default.version"));
URL consumerUrl = URL.valueOf("consumer://127.0.0.1/com.xxx.XxxService?version=1.0.0&group=test");
assertTrue(UrlUtils.isMatch(consumerUrl, url));
URL consumerUrl1 =
URL.valueOf("consumer://127.0.0.1/com.xxx.XxxService?default.version=1.0.0&default.group=test");
assertTrue(UrlUtils.isMatch(consumerUrl1, url));
}
@Test
public void testIsConsumer() {
String address1 = "remote://root:alibaba@127.0.0.1:9090";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "consumer://root:alibaba@127.0.0.1:9090";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "consumer://root:alibaba@127.0.0.1";
URL url3 = UrlUtils.parseURL(address3, null);
assertFalse(UrlUtils.isConsumer(url1));
assertTrue(UrlUtils.isConsumer(url2));
assertTrue(UrlUtils.isConsumer(url3));
}
@Test
public void testPrivateConstructor() throws Exception {
Constructor<UrlUtils> constructor = UrlUtils.class.getDeclaredConstructor();
assertTrue(Modifier.isPrivate(constructor.getModifiers()));
constructor.setAccessible(true);
assertThrows(InvocationTargetException.class, () -> {
constructor.newInstance();
});
}
@Test
public void testClassifyUrls() {
String address1 = "remote://root:alibaba@127.0.0.1:9090";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "consumer://root:alibaba@127.0.0.1:9090";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "remote://root:alibaba@127.0.0.1";
URL url3 = UrlUtils.parseURL(address3, null);
String address4 = "consumer://root:alibaba@127.0.0.1";
URL url4 = UrlUtils.parseURL(address4, null);
List<URL> urls = new ArrayList<>();
urls.add(url1);
urls.add(url2);
urls.add(url3);
urls.add(url4);
List<URL> consumerUrls = UrlUtils.classifyUrls(urls, UrlUtils::isConsumer);
assertEquals(2, consumerUrls.size());
assertTrue(consumerUrls.contains(url2));
assertTrue(consumerUrls.contains(url4));
List<URL> nonConsumerUrls = UrlUtils.classifyUrls(urls, url -> !UrlUtils.isConsumer(url));
assertEquals(2, nonConsumerUrls.size());
assertTrue(nonConsumerUrls.contains(url1));
assertTrue(nonConsumerUrls.contains(url3));
}
@Test
public void testHasServiceDiscoveryRegistryProtocol() {
String address1 = "http://root:alibaba@127.0.0.1:9090/dubbo.test.api";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "service-discovery-registry://root:alibaba@127.0.0.1:9090/dubbo.test.api";
URL url2 = UrlUtils.parseURL(address2, null);
assertFalse(UrlUtils.hasServiceDiscoveryRegistryProtocol(url1));
assertTrue(UrlUtils.hasServiceDiscoveryRegistryProtocol(url2));
}
private static final String SERVICE_REGISTRY_TYPE = "service";
private static final String REGISTRY_TYPE_KEY = "registry-type";
@Test
public void testHasServiceDiscoveryRegistryTypeKey() {
Map<String, String> parameters1 = new HashMap<>();
parameters1.put(REGISTRY_TYPE_KEY, "value2");
assertFalse(UrlUtils.hasServiceDiscoveryRegistryTypeKey(parameters1));
Map<String, String> parameters2 = new HashMap<>();
parameters2.put(REGISTRY_TYPE_KEY, SERVICE_REGISTRY_TYPE);
assertTrue(UrlUtils.hasServiceDiscoveryRegistryTypeKey(parameters2));
}
@Test
public void testIsConfigurator() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "override://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "http://example.com?category=configurators";
URL url3 = UrlUtils.parseURL(address3, null);
assertFalse(UrlUtils.isConfigurator(url1));
assertTrue(UrlUtils.isConfigurator(url2));
assertTrue(UrlUtils.isConfigurator(url3));
}
@Test
public void testIsRoute() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "route://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "http://example.com?category=routers";
URL url3 = UrlUtils.parseURL(address3, null);
assertFalse(UrlUtils.isRoute(url1));
assertTrue(UrlUtils.isRoute(url2));
assertTrue(UrlUtils.isRoute(url3));
}
@Test
public void testIsProvider() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "override://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "route://example.com";
URL url3 = UrlUtils.parseURL(address3, null);
String address4 = "http://example.com?category=providers";
URL url4 = UrlUtils.parseURL(address4, null);
String address5 = "http://example.com?category=something-else";
URL url5 = UrlUtils.parseURL(address5, null);
assertTrue(UrlUtils.isProvider(url1));
assertFalse(UrlUtils.isProvider(url2));
assertFalse(UrlUtils.isProvider(url3));
assertTrue(UrlUtils.isProvider(url4));
assertFalse(UrlUtils.isProvider(url5));
}
@Test
public void testIsRegistry() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "registry://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "sr://example.com";
URL url3 = UrlUtils.parseURL(address3, null);
String address4 = "custom-registry-protocol://example.com";
URL url4 = UrlUtils.parseURL(address4, null);
assertFalse(UrlUtils.isRegistry(url1));
assertTrue(UrlUtils.isRegistry(url2));
assertFalse(UrlUtils.isRegistry(url3));
assertTrue(UrlUtils.isRegistry(url4));
}
@Test
public void testIsServiceDiscoveryURL() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "service-discovery-registry://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "SERVICE-DISCOVERY-REGISTRY://example.com";
URL url3 = UrlUtils.parseURL(address3, null);
String address4 = "http://example.com?registry-type=service";
URL url4 = UrlUtils.parseURL(address4, null);
url4.addParameter(REGISTRY_TYPE_KEY, SERVICE_REGISTRY_TYPE);
assertFalse(UrlUtils.isServiceDiscoveryURL(url1));
assertTrue(UrlUtils.isServiceDiscoveryURL(url2));
assertTrue(UrlUtils.isServiceDiscoveryURL(url3));
assertTrue(UrlUtils.isServiceDiscoveryURL(url4));
}
}
| UrlUtilsTest |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java | {
"start": 1635,
"end": 7216
} | class ____ extends ESTestCase {
private static final PatternBank LEGACY_TEST_PATTERNS = new PatternBank(Map.of("PATTERN2", "foo2", "PATTERN1", "foo1"));
private static final PatternBank ECS_TEST_PATTERNS = new PatternBank(Map.of("ECS_PATTERN2", "foo2", "ECS_PATTERN1", "foo1"));
public void testRequest() throws Exception {
GrokProcessorGetAction.Request request = new GrokProcessorGetAction.Request(false, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE);
BytesStreamOutput out = new BytesStreamOutput();
request.writeTo(out);
StreamInput streamInput = out.bytes().streamInput();
GrokProcessorGetAction.Request otherRequest = new GrokProcessorGetAction.Request(streamInput);
assertThat(otherRequest.validate(), nullValue());
}
public void testResponseSerialization() throws Exception {
GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(LEGACY_TEST_PATTERNS.bank());
BytesStreamOutput out = new BytesStreamOutput();
response.writeTo(out);
StreamInput streamInput = out.bytes().streamInput();
GrokProcessorGetAction.Response otherResponse = new GrokProcessorGetAction.Response(streamInput);
assertThat(response.getGrokPatterns(), equalTo(LEGACY_TEST_PATTERNS.bank()));
assertThat(response.getGrokPatterns(), equalTo(otherResponse.getGrokPatterns()));
}
public void testResponseSorting() {
List<String> sortedKeys = new ArrayList<>(LEGACY_TEST_PATTERNS.bank().keySet());
Collections.sort(sortedKeys);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
GrokProcessorGetAction.TransportAction transportAction = new GrokProcessorGetAction.TransportAction(
transportService,
mock(ActionFilters.class),
LEGACY_TEST_PATTERNS,
ECS_TEST_PATTERNS
);
GrokProcessorGetAction.Response[] receivedResponse = new GrokProcessorGetAction.Response[1];
transportAction.doExecute(
null,
new GrokProcessorGetAction.Request(true, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE),
new ActionListener<>() {
@Override
public void onResponse(GrokProcessorGetAction.Response response) {
receivedResponse[0] = response;
}
@Override
public void onFailure(Exception e) {
fail();
}
}
);
assertThat(receivedResponse[0], notNullValue());
assertThat(receivedResponse[0].getGrokPatterns().keySet().toArray(), equalTo(sortedKeys.toArray()));
GrokProcessorGetAction.Response firstResponse = receivedResponse[0];
transportAction.doExecute(
null,
new GrokProcessorGetAction.Request(true, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE),
new ActionListener<>() {
@Override
public void onResponse(GrokProcessorGetAction.Response response) {
receivedResponse[0] = response;
}
@Override
public void onFailure(Exception e) {
fail();
}
}
);
assertThat(receivedResponse[0], notNullValue());
assertThat(receivedResponse[0], not(sameInstance(firstResponse)));
assertThat(receivedResponse[0].getGrokPatterns(), sameInstance(firstResponse.getGrokPatterns()));
}
public void testEcsCompatibilityMode() {
List<String> sortedKeys = new ArrayList<>(ECS_TEST_PATTERNS.bank().keySet());
Collections.sort(sortedKeys);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
GrokProcessorGetAction.TransportAction transportAction = new GrokProcessorGetAction.TransportAction(
transportService,
mock(ActionFilters.class),
LEGACY_TEST_PATTERNS,
ECS_TEST_PATTERNS
);
GrokProcessorGetAction.Response[] receivedResponse = new GrokProcessorGetAction.Response[1];
transportAction.doExecute(null, new GrokProcessorGetAction.Request(true, ECS_COMPATIBILITY_V1), new ActionListener<>() {
@Override
public void onResponse(GrokProcessorGetAction.Response response) {
receivedResponse[0] = response;
}
@Override
public void onFailure(Exception e) {
fail();
}
});
assertThat(receivedResponse[0], notNullValue());
assertThat(receivedResponse[0].getGrokPatterns().keySet().toArray(), equalTo(sortedKeys.toArray()));
}
@SuppressWarnings("unchecked")
public void testResponseToXContent() throws Exception {
GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(LEGACY_TEST_PATTERNS.bank());
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
Map<String, Object> converted = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
Map<String, String> patterns = (Map<String, String>) converted.get("patterns");
assertThat(patterns.size(), equalTo(2));
assertThat(patterns.get("PATTERN1"), equalTo("foo1"));
assertThat(patterns.get("PATTERN2"), equalTo("foo2"));
}
}
}
| GrokProcessorGetActionTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LiteEnumValueOfTest.java | {
"start": 1220,
"end": 1394
} | interface ____ {}\
""")
.addSourceLines(
"Parcelable.java",
"""
package android.os;
public | Parcel |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/textui/TTree.java | {
"start": 7101,
"end": 7151
} | interface ____ recursive visit
*/
private | for |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/condition/VersionRequestCondition.java | {
"start": 1442,
"end": 5556
} | class ____ extends AbstractRequestCondition<VersionRequestCondition> {
private final @Nullable String versionValue;
private final @Nullable Comparable<?> version;
private final boolean baselineVersion;
private final Set<String> content;
/**
* Constructor with the version, if set on the {@code @RequestMapping}, and
* the {@code ApiVersionStrategy}, if API versioning is enabled.
*/
public VersionRequestCondition(@Nullable String version, @Nullable ApiVersionStrategy strategy) {
if (StringUtils.hasText(version)) {
Assert.isTrue(strategy != null, "ApiVersionStrategy is required for mapping by version");
this.baselineVersion = version.endsWith("+");
this.versionValue = updateVersion(version, this.baselineVersion);
this.version = strategy.parseVersion(this.versionValue);
this.content = Set.of(version);
}
else {
this.versionValue = null;
this.version = null;
this.baselineVersion = false;
this.content = Collections.emptySet();
}
}
private static String updateVersion(String version, boolean baselineVersion) {
return (baselineVersion ? version.substring(0, version.length() - 1) : version);
}
@Override
protected Collection<String> getContent() {
return this.content;
}
@Override
protected String getToStringInfix() {
return " && ";
}
/**
* Return the raw version value.
*/
public @Nullable String getVersion() {
return this.versionValue;
}
@Override
public VersionRequestCondition combine(VersionRequestCondition other) {
return (other.version != null ? other : this);
}
@Override
public @Nullable VersionRequestCondition getMatchingCondition(HttpServletRequest request) {
Comparable<?> requestVersion = (Comparable<?>) request.getAttribute(HandlerMapping.API_VERSION_ATTRIBUTE);
if (this.version == null || requestVersion == null) {
return this;
}
// Always use a baseline match here in order to select the highest version (baseline or fixed)
// The fixed version match is enforced at the end in handleMatch()
int result = compareVersions(this.version, requestVersion);
return (result <= 0 ? this : null);
}
@SuppressWarnings("unchecked")
private <V extends Comparable<V>> int compareVersions(Object v1, Object v2) {
return ((V) v1).compareTo((V) v2);
}
@Override
public int compareTo(VersionRequestCondition other, HttpServletRequest request) {
Object otherVersion = other.version;
if (this.version == null && otherVersion == null) {
return 0;
}
else if (this.version != null && otherVersion != null) {
// make higher version bubble up
return (-1 * compareVersions(this.version, otherVersion));
}
else {
// Prefer mappings with a version unless the request is without a version
int result = this.version != null ? -1 : 1;
Comparable<?> version = (Comparable<?>) request.getAttribute(HandlerMapping.API_VERSION_ATTRIBUTE);
return (version == null ? -1 * result : result);
}
}
/**
* Perform a final check on the matched request mapping version.
* <p>In order to ensure baseline versions are properly capped by higher
* fixed versions, initially we match all versions as baseline versions in
* {@link #getMatchingCondition(HttpServletRequest)}. Once the highest of
* potentially multiple matches is selected, we enforce the strict match
* for fixed versions.
* <p>For example, given controller methods for "1.2+" and "1.5", and
* a request for "1.6", both are matched, allowing "1.5" to be selected, but
* that is then rejected as not acceptable since it is not an exact match.
* @param request the current request
* @throws NotAcceptableApiVersionException if the matched condition has a
* fixed version that is not equal to the request version
*/
public void handleMatch(HttpServletRequest request) {
if (this.version != null && !this.baselineVersion) {
Comparable<?> version = (Comparable<?>) request.getAttribute(HandlerMapping.API_VERSION_ATTRIBUTE);
if (version != null && !this.version.equals(version)) {
throw new NotAcceptableApiVersionException(version.toString());
}
}
}
}
| VersionRequestCondition |
java | apache__camel | components/camel-kubernetes/src/main/java/org/apache/camel/component/openshift/deploymentconfigs/OpenshiftDeploymentConfigsProducer.java | {
"start": 1899,
"end": 10595
} | class ____ extends DefaultProducer {
private static final Logger LOG = LoggerFactory.getLogger(OpenshiftDeploymentConfigsProducer.class);
public OpenshiftDeploymentConfigsProducer(AbstractKubernetesEndpoint endpoint) {
super(endpoint);
}
@Override
public AbstractKubernetesEndpoint getEndpoint() {
return (AbstractKubernetesEndpoint) super.getEndpoint();
}
@Override
public void process(Exchange exchange) throws Exception {
String operation = KubernetesHelper.extractOperation(getEndpoint(), exchange);
switch (operation) {
case KubernetesOperations.LIST_DEPLOYMENT_CONFIGS:
doList(exchange);
break;
case KubernetesOperations.LIST_DEPLOYMENT_CONFIGS_BY_LABELS_OPERATION:
doListDeploymentConfigsByLabels(exchange);
break;
case KubernetesOperations.GET_DEPLOYMENT_CONFIG:
doGetDeploymentConfig(exchange);
break;
case KubernetesOperations.DELETE_DEPLOYMENT_CONFIG:
doDeleteDeploymentConfig(exchange);
break;
case KubernetesOperations.CREATE_DEPLOYMENT_CONFIG:
doCreateDeployment(exchange);
break;
case KubernetesOperations.UPDATE_DEPLOYMENT_CONFIG:
doUpdateDeployment(exchange);
break;
case KubernetesOperations.SCALE_DEPLOYMENT_CONFIG:
doScaleDeploymentConfig(exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation " + operation);
}
}
protected void doList(Exchange exchange) {
DeploymentConfigList deploymentConfigList
= getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs().list();
prepareOutboundMessage(exchange, deploymentConfigList.getItems());
}
protected void doListDeploymentConfigsByLabels(Exchange exchange) {
Map<String, String> labels
= exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENTS_LABELS, Map.class);
DeploymentConfigList deploymentConfigList
= getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs()
.withLabels(labels).list();
prepareOutboundMessage(exchange, deploymentConfigList.getItems());
}
protected void doGetDeploymentConfig(Exchange exchange) {
String deploymentConfigName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_NAME, String.class);
if (ObjectHelper.isEmpty(deploymentConfigName)) {
LOG.error("Get a specific Deployment Config require specify a Deployment name");
throw new IllegalArgumentException("Get a specific Deployment Config require specify a Deployment Config name");
}
DeploymentConfig deploymentConfig = getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs()
.withName(deploymentConfigName).get();
prepareOutboundMessage(exchange, deploymentConfig);
}
protected void doDeleteDeploymentConfig(Exchange exchange) {
String deploymentName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_NAME, String.class);
String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
if (ObjectHelper.isEmpty(deploymentName)) {
LOG.error("Delete a specific deployment config require specify a deployment name");
throw new IllegalArgumentException("Delete a specific deployment require specify a deployment config name");
}
if (ObjectHelper.isEmpty(namespaceName)) {
LOG.error("Delete a specific deployment config require specify a namespace name");
throw new IllegalArgumentException("Delete a specific deployment config require specify a namespace name");
}
List<StatusDetails> statusDetails = getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs()
.inNamespace(namespaceName)
.withName(deploymentName).delete();
boolean deploymentConfigDeleted = ObjectHelper.isNotEmpty(statusDetails);
prepareOutboundMessage(exchange, deploymentConfigDeleted);
}
protected void doUpdateDeployment(Exchange exchange) {
doCreateOrUpdateDeployment(exchange, "Update", Resource::update);
}
protected void doCreateDeployment(Exchange exchange) {
doCreateOrUpdateDeployment(exchange, "Create", Resource::create);
}
private void doCreateOrUpdateDeployment(
Exchange exchange, String operationName, Function<Resource<DeploymentConfig>, DeploymentConfig> operation) {
String deploymentName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_NAME, String.class);
String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
DeploymentConfigSpec dcSpec
= exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_CONFIG_SPEC, DeploymentConfigSpec.class);
if (ObjectHelper.isEmpty(deploymentName)) {
LOG.error("{} a specific Deployment Config require specify a Deployment name", operationName);
throw new IllegalArgumentException(
String.format("%s a specific Deployment Config require specify a pod name", operationName));
}
if (ObjectHelper.isEmpty(namespaceName)) {
LOG.error("{} a specific Deployment Config require specify a namespace name", operationName);
throw new IllegalArgumentException(
String.format("%s a specific Deployment Config require specify a namespace name", operationName));
}
if (ObjectHelper.isEmpty(dcSpec)) {
LOG.error("{} a specific Deployment Config require specify a Deployment Config spec bean", operationName);
throw new IllegalArgumentException(
String.format("%s a specific Deployment Config require specify a Deployment Config spec bean",
operationName));
}
Map<String, String> labels = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENTS_LABELS, Map.class);
DeploymentConfig deploymentCreating = new DeploymentConfigBuilder().withNewMetadata().withName(deploymentName)
.withLabels(labels).endMetadata().withSpec(dcSpec).build();
DeploymentConfig deploymentConfig
= operation.apply(getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs()
.inNamespace(namespaceName)
.resource(deploymentCreating));
prepareOutboundMessage(exchange, deploymentConfig);
}
protected void doScaleDeploymentConfig(Exchange exchange) {
String deploymentName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_NAME, String.class);
String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
Integer replicasNumber = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_DEPLOYMENT_REPLICAS, Integer.class);
if (ObjectHelper.isEmpty(deploymentName)) {
LOG.error("Scale a specific deployment config require specify a deployment config name");
throw new IllegalArgumentException("Scale a specific deployment config require specify a deployment config name");
}
if (ObjectHelper.isEmpty(namespaceName)) {
LOG.error("Scale a specific deployment config require specify a namespace name");
throw new IllegalArgumentException("Scale a specific deployment config require specify a namespace name");
}
if (ObjectHelper.isEmpty(replicasNumber)) {
LOG.error("Scale a specific deployment config require specify a replicas number");
throw new IllegalArgumentException("Scale a specific deployment config require specify a replicas number");
}
DeploymentConfig deploymentConfigScaled
= getEndpoint().getKubernetesClient().adapt(OpenShiftClient.class).deploymentConfigs()
.inNamespace(namespaceName)
.withName(deploymentName).scale(replicasNumber);
prepareOutboundMessage(exchange, deploymentConfigScaled.getStatus().getReplicas());
}
}
| OpenshiftDeploymentConfigsProducer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/lucene/spatial/TriangleTreeVisitor.java | {
"start": 848,
"end": 2475
} | interface ____ {
/** visit a node point. */
void visitPoint(int x, int y);
/** visit a node line. */
void visitLine(int aX, int aY, int bX, int bY, byte metadata);
/** visit a node triangle. */
void visitTriangle(int aX, int aY, int bX, int bY, int cX, int cY, byte metadata);
/** Should the visitor keep visiting the tree. Called after visiting a node or skipping
* a tree branch, if the return value is {@code false}, no more nodes will be visited. */
boolean push();
/** Should the visitor visit nodes that have bounds greater or equal
* than the {@code minX} provided. */
boolean pushX(int minX);
/** Should the visitor visit nodes that have bounds greater or equal
* than the {@code minY} provided. */
boolean pushY(int minY);
/** Should the visitor visit nodes that have bounds lower or equal than the
* {@code maxX} and {@code minX} provided. */
boolean push(int maxX, int maxY);
/** Should the visitor visit the tree given the bounding box of the tree. Called before
* visiting the tree. */
boolean push(int minX, int minY, int maxX, int maxY);
static boolean abFromTriangle(byte metadata) {
return (metadata & AB_FROM_TRIANGLE) == AB_FROM_TRIANGLE;
}
static boolean bcFromTriangle(byte metadata) {
return (metadata & BC_FROM_TRIANGLE) == BC_FROM_TRIANGLE;
}
static boolean caFromTriangle(byte metadata) {
return (metadata & CA_FROM_TRIANGLE) == CA_FROM_TRIANGLE;
}
/** Visitor for triangle interval tree which decodes the coordinates */
abstract | TriangleTreeVisitor |
java | apache__camel | components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/eventnotifier/MicrometerExchangeEventNotifierNamingStrategyDefault.java | {
"start": 1055,
"end": 1679
} | class ____ implements MicrometerExchangeEventNotifierNamingStrategy {
private boolean endpointBaseURI = true;
public MicrometerExchangeEventNotifierNamingStrategyDefault() {
}
public MicrometerExchangeEventNotifierNamingStrategyDefault(boolean endpointBaseURI) {
this.endpointBaseURI = endpointBaseURI;
}
@Override
public String getName(Exchange exchange, Endpoint endpoint) {
return DEFAULT_CAMEL_EXCHANGE_EVENT_METER_NAME;
}
@Override
public boolean isBaseEndpointURI() {
return endpointBaseURI;
}
}
| MicrometerExchangeEventNotifierNamingStrategyDefault |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/foreignkeys/definition/ForeignKeyDefinitionManyToOneTest.java | {
"start": 517,
"end": 862
} | class ____ extends AbstractForeignKeyDefinitionTest {
@Override
protected Class<?>[] getAnnotatedClasses() {
return new Class<?>[] { Box.class, Thing.class };
}
@Override
protected boolean validate(String fileContent) {
return fileContent.contains( "/* FK */" );
}
@Entity(name = "Box")
public static | ForeignKeyDefinitionManyToOneTest |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/MicronautHttpData.java | {
"start": 2426,
"end": 2541
} | class ____ release chunks of that data for concurrent access by the user (see
* {@link #pollChunk()}).<br>
* This | can |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerStateEntry.java | {
"start": 1574,
"end": 7144
} | class ____ {
public static final int NUM_BATCHES_TO_RETAIN = 5;
private final long producerId;
private final Deque<BatchMetadata> batchMetadata = new ArrayDeque<>();
private short producerEpoch;
private int coordinatorEpoch;
private long lastTimestamp;
private OptionalLong currentTxnFirstOffset;
public static ProducerStateEntry empty(long producerId) {
return new ProducerStateEntry(producerId, RecordBatch.NO_PRODUCER_EPOCH, -1, RecordBatch.NO_TIMESTAMP, OptionalLong.empty(), Optional.empty());
}
public ProducerStateEntry(long producerId, short producerEpoch, int coordinatorEpoch, long lastTimestamp, OptionalLong currentTxnFirstOffset, Optional<BatchMetadata> firstBatchMetadata) {
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.coordinatorEpoch = coordinatorEpoch;
this.lastTimestamp = lastTimestamp;
this.currentTxnFirstOffset = currentTxnFirstOffset;
firstBatchMetadata.ifPresent(batchMetadata::add);
}
public int firstSeq() {
return isEmpty() ? RecordBatch.NO_SEQUENCE : batchMetadata.getFirst().firstSeq();
}
public int lastSeq() {
return isEmpty() ? RecordBatch.NO_SEQUENCE : batchMetadata.getLast().lastSeq();
}
public long firstDataOffset() {
return isEmpty() ? -1L : batchMetadata.getFirst().firstOffset();
}
public long lastDataOffset() {
return isEmpty() ? -1L : batchMetadata.getLast().lastOffset();
}
public int lastOffsetDelta() {
return isEmpty() ? 0 : batchMetadata.getLast().offsetDelta();
}
public boolean isEmpty() {
return batchMetadata.isEmpty();
}
/**
* Returns a new instance with the provided parameters (when present) and the values from the current instance
* otherwise.
*/
public ProducerStateEntry withProducerIdAndBatchMetadata(long producerId, Optional<BatchMetadata> batchMetadata) {
return new ProducerStateEntry(producerId, this.producerEpoch(), this.coordinatorEpoch, this.lastTimestamp,
this.currentTxnFirstOffset, batchMetadata);
}
public void addBatch(short producerEpoch, int lastSeq, long lastOffset, int offsetDelta, long timestamp) {
maybeUpdateProducerEpoch(producerEpoch);
addBatchMetadata(new BatchMetadata(lastSeq, lastOffset, offsetDelta, timestamp));
this.lastTimestamp = timestamp;
}
public boolean maybeUpdateProducerEpoch(short producerEpoch) {
if (this.producerEpoch != producerEpoch) {
batchMetadata.clear();
this.producerEpoch = producerEpoch;
return true;
} else {
return false;
}
}
private void addBatchMetadata(BatchMetadata batch) {
if (batchMetadata.size() == ProducerStateEntry.NUM_BATCHES_TO_RETAIN) batchMetadata.removeFirst();
batchMetadata.add(batch);
}
public void update(ProducerStateEntry nextEntry) {
update(nextEntry.producerEpoch, nextEntry.coordinatorEpoch, nextEntry.lastTimestamp, nextEntry.batchMetadata, nextEntry.currentTxnFirstOffset);
}
public void update(short producerEpoch, int coordinatorEpoch, long lastTimestamp) {
update(producerEpoch, coordinatorEpoch, lastTimestamp, new ArrayDeque<>(0), OptionalLong.empty());
}
private void update(short producerEpoch, int coordinatorEpoch, long lastTimestamp, Deque<BatchMetadata> batchMetadata,
OptionalLong currentTxnFirstOffset) {
maybeUpdateProducerEpoch(producerEpoch);
while (!batchMetadata.isEmpty())
addBatchMetadata(batchMetadata.removeFirst());
this.coordinatorEpoch = coordinatorEpoch;
this.currentTxnFirstOffset = currentTxnFirstOffset;
this.lastTimestamp = lastTimestamp;
}
public void setCurrentTxnFirstOffset(long firstOffset) {
this.currentTxnFirstOffset = OptionalLong.of(firstOffset);
}
public Optional<BatchMetadata> findDuplicateBatch(RecordBatch batch) {
if (batch.producerEpoch() != producerEpoch) return Optional.empty();
else return batchWithSequenceRange(batch.baseSequence(), batch.lastSequence());
}
// Return the batch metadata of the cached batch having the exact sequence range, if any.
Optional<BatchMetadata> batchWithSequenceRange(int firstSeq, int lastSeq) {
Stream<BatchMetadata> duplicate = batchMetadata.stream().filter(metadata -> firstSeq == metadata.firstSeq() && lastSeq == metadata.lastSeq());
return duplicate.findFirst();
}
public Collection<BatchMetadata> batchMetadata() {
return Collections.unmodifiableCollection(batchMetadata);
}
public short producerEpoch() {
return producerEpoch;
}
public long producerId() {
return producerId;
}
public int coordinatorEpoch() {
return coordinatorEpoch;
}
public long lastTimestamp() {
return lastTimestamp;
}
public OptionalLong currentTxnFirstOffset() {
return currentTxnFirstOffset;
}
@Override
public String toString() {
return "ProducerStateEntry(" +
"producerId=" + producerId +
", producerEpoch=" + producerEpoch +
", currentTxnFirstOffset=" + currentTxnFirstOffset +
", coordinatorEpoch=" + coordinatorEpoch +
", lastTimestamp=" + lastTimestamp +
", batchMetadata=" + batchMetadata +
')';
}
}
| ProducerStateEntry |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/aggregator/AggregatorIntegrationTests.java | {
"start": 10519,
"end": 11110
} | class ____ extends SimpleArgumentsAggregator {
@Override
public Address aggregateArguments(ArgumentsAccessor arguments, Class<?> targetType,
AnnotatedElementContext context, int parameterIndex) {
int startIndex = context.findAnnotation(StartIndex.class).map(StartIndex::value).orElse(0);
// @formatter:off
return new Address(
arguments.getString(startIndex + 0),
arguments.getString(startIndex + 1),
requireNonNull(arguments.getInteger(startIndex + 2))
);
// @formatter:on
}
}
/**
* Maps from String to length of String.
*/
static | AddressAggregator |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/OidcClientRegistrationTests.java | {
"start": 31088,
"end": 32263
} | class ____ extends AuthorizationServerConfiguration {
// @formatter:off
@Bean
@Override
public SecurityFilterChain authorizationServerSecurityFilterChain(HttpSecurity http) throws Exception {
http
.oauth2AuthorizationServer((authorizationServer) ->
authorizationServer
.oidc((oidc) ->
oidc
.clientRegistrationEndpoint((clientRegistration) ->
clientRegistration
.clientRegistrationRequestConverter(authenticationConverter)
.clientRegistrationRequestConverters(authenticationConvertersConsumer)
.authenticationProvider(authenticationProvider)
.authenticationProviders(authenticationProvidersConsumer)
.clientRegistrationResponseHandler(authenticationSuccessHandler)
.errorResponseHandler(authenticationFailureHandler)
)
)
)
.authorizeHttpRequests((authorize) ->
authorize.anyRequest().authenticated()
);
return http.build();
}
// @formatter:on
}
@EnableWebSecurity
@Configuration(proxyBeanMethods = false)
static | CustomClientRegistrationConfiguration |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/IllegalStateExceptionMapper.java | {
"start": 198,
"end": 451
} | class ____ implements ExceptionMapper<IllegalStateException> {
@Override
public Response toResponse(IllegalStateException exception) {
return Response.serverError().entity(exception.getMessage()).build();
}
}
| IllegalStateExceptionMapper |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperatorTest.java | {
"start": 56119,
"end": 57096
} | class ____ implements Comparator<Object> {
@Override
public int compare(Object o1, Object o2) {
if (o1 instanceof Watermark || o2 instanceof Watermark) {
return 0;
} else {
StreamRecord<Tuple2<String, Integer>> sr0 =
(StreamRecord<Tuple2<String, Integer>>) o1;
StreamRecord<Tuple2<String, Integer>> sr1 =
(StreamRecord<Tuple2<String, Integer>>) o2;
if (sr0.getTimestamp() != sr1.getTimestamp()) {
return (int) (sr0.getTimestamp() - sr1.getTimestamp());
}
int comparison = sr0.getValue().f0.compareTo(sr1.getValue().f0);
if (comparison != 0) {
return comparison;
} else {
return sr0.getValue().f1 - sr1.getValue().f1;
}
}
}
}
private static | ResultSortComparator |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerMXBean.java | {
"start": 863,
"end": 1337
} | interface ____ {
/**
* Gets the version of Hadoop.
*
* @return the version of Hadoop
*/
String getVersion();
/**
* Get the version of software running on the Balancer.
*
* @return a string representing the version.
*/
String getSoftwareVersion();
/**
* Get the compilation information which contains date, user and branch.
*
* @return the compilation information, as a JSON string.
*/
String getCompileInfo();
}
| BalancerMXBean |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/BatchReader.java | {
"start": 902,
"end": 1419
} | interface ____ used to send committed data from the {@link RaftClient}
* down to registered {@link RaftClient.Listener} instances.
*
* The advantage of hiding the consumption of committed batches behind an interface
* is that it allows us to push blocking operations such as reads from disk outside
* of the Raft IO thread. This helps to ensure that a slow state machine will not
* affect replication.
*
* @param <T> record type (see {@link org.apache.kafka.server.common.serialization.RecordSerde})
*/
public | is |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/search/DummyQueryParserPlugin.java | {
"start": 918,
"end": 1446
} | class ____ extends Plugin implements SearchPlugin {
@Override
public List<QuerySpec<?>> getQueries() {
return List.of(
new QuerySpec<>(DummyQueryBuilder.NAME, DummyQueryBuilder::new, DummyQueryBuilder::fromXContent),
new QuerySpec<>(
FailBeforeCurrentVersionQueryBuilder.NAME,
FailBeforeCurrentVersionQueryBuilder::new,
FailBeforeCurrentVersionQueryBuilder::fromXContent
)
);
}
public static | DummyQueryParserPlugin |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java | {
"start": 1792,
"end": 1923
} | class ____ thread safe. Make sure to check `AbstractIndex` subclasses
* documentation to establish their thread safety.
*/
public | are |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java | {
"start": 96125,
"end": 99708
} | interface ____ the machine running the tests. The tests run concurrently in multiple
* JVMs, but all have access to the same network, so there's a risk that different tests will interact with each other in unexpected
* ways and trigger spurious failures. Gradle numbers its workers sequentially starting at 1 and each worker can determine its own
* identity from the {@link #TEST_WORKER_SYS_PROPERTY} system property. We use this to try and assign disjoint port ranges to each test
* worker, avoiding any unexpected interactions, although if we spawn enough test workers then we will wrap around to the beginning
* again.
*/
/**
* Defines the size of the port range assigned to each worker, which must be large enough to supply enough ports to run the tests, but
* not so large that we run out of ports. See also [NOTE: Port ranges for tests].
*/
private static final int PORTS_PER_WORKER = 30;
/**
* Defines the minimum port that test workers should use. See also [NOTE: Port ranges for tests].
*/
protected static final int MIN_PRIVATE_PORT = 13301;
/**
* Defines the maximum port that test workers should use. See also [NOTE: Port ranges for tests].
*/
private static final int MAX_PRIVATE_PORT = 32767;
/**
* Wrap around after reaching this worker ID.
*/
private static final int MAX_EFFECTIVE_WORKER_ID = (MAX_PRIVATE_PORT - MIN_PRIVATE_PORT - PORTS_PER_WORKER + 1) / PORTS_PER_WORKER - 1;
static {
assert getWorkerBasePort(MAX_EFFECTIVE_WORKER_ID) + PORTS_PER_WORKER - 1 <= MAX_PRIVATE_PORT;
}
/**
* Returns a port range for this JVM according to its Gradle worker ID. See also [NOTE: Port ranges for tests].
*/
public static String getPortRange() {
final var firstPort = getWorkerBasePort();
final var lastPort = firstPort + PORTS_PER_WORKER - 1; // upper bound is inclusive
assert MIN_PRIVATE_PORT <= firstPort && lastPort <= MAX_PRIVATE_PORT;
return firstPort + "-" + lastPort;
}
/**
* Returns the start of the port range for this JVM according to its Gradle worker ID. See also [NOTE: Port ranges for tests].
*/
protected static int getWorkerBasePort() {
final var workerIdStr = System.getProperty(ESTestCase.TEST_WORKER_SYS_PROPERTY);
if (workerIdStr == null) {
// running in IDE
return MIN_PRIVATE_PORT;
}
final var workerId = Integer.parseInt(workerIdStr);
assert workerId >= 1 : "Non positive gradle worker id: " + workerIdStr;
return getWorkerBasePort(workerId % (MAX_EFFECTIVE_WORKER_ID + 1));
}
private static int getWorkerBasePort(int effectiveWorkerId) {
assert 0 <= effectiveWorkerId && effectiveWorkerId <= MAX_EFFECTIVE_WORKER_ID;
// the range [MIN_PRIVATE_PORT, MIN_PRIVATE_PORT+PORTS_PER_WORKER) is only for running outside of Gradle
return MIN_PRIVATE_PORT + PORTS_PER_WORKER + effectiveWorkerId * PORTS_PER_WORKER;
}
public static InetAddress randomIp(boolean v4) {
try {
if (v4) {
byte[] ipv4 = new byte[4];
random().nextBytes(ipv4);
return InetAddress.getByAddress(ipv4);
} else {
byte[] ipv6 = new byte[16];
random().nextBytes(ipv6);
return InetAddress.getByAddress(ipv6);
}
} catch (UnknownHostException e) {
throw new AssertionError();
}
}
public static final | of |
java | netty__netty | resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java | {
"start": 2469,
"end": 3666
} | class ____ for the logger to use.
* @param level The log level to use for logging resolver events.
*/
public LoggingDnsQueryLifeCycleObserverFactory(Class<?> classContext, LogLevel level) {
this.level = checkAndConvertLevel(level);
logger = InternalLoggerFactory.getInstance(checkNotNull(classContext, "classContext"));
}
/**
* Create {@link DnsQueryLifecycleObserver} instances that log events to a logger with the given name context,
* at the given log level.
* @param name The name for the logger to use.
* @param level The log level to use for logging resolver events.
*/
public LoggingDnsQueryLifeCycleObserverFactory(String name, LogLevel level) {
this.level = checkAndConvertLevel(level);
logger = InternalLoggerFactory.getInstance(checkNotNull(name, "name"));
}
private static InternalLogLevel checkAndConvertLevel(LogLevel level) {
return checkNotNull(level, "level").toInternalLevel();
}
@Override
public DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) {
return new LoggingDnsQueryLifecycleObserver(question, logger, level);
}
}
| context |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EffectivelyPrivateTest.java | {
"start": 1858,
"end": 2032
} | class ____ implements Runnable {
void foo() {}
@Override
public void run() {}
}
""")
.doTest();
}
}
| T |
java | grpc__grpc-java | stub/src/main/java/io/grpc/stub/ServerCalls.java | {
"start": 8492,
"end": 8834
} | class ____ extends ServerCall.Listener<ReqT> {
private final StreamObserver<ReqT> requestObserver;
private final ServerCallStreamObserverImpl<ReqT, RespT> responseObserver;
private final ServerCall<ReqT, RespT> call;
private boolean halfClosed = false;
// Non private to avoid synthetic | StreamingServerCallListener |
java | square__javapoet | src/main/java/com/squareup/javapoet/CodeBlock.java | {
"start": 1440,
"end": 3171
} | class ____ dollar sign {@code $} and has
* its own set of permitted placeholders:
*
* <ul>
* <li>{@code $L} emits a <em>literal</em> value with no escaping. Arguments for literals may be
* strings, primitives, {@linkplain TypeSpec type declarations}, {@linkplain AnnotationSpec
* annotations} and even other code blocks.
* <li>{@code $N} emits a <em>name</em>, using name collision avoidance where necessary. Arguments
* for names may be strings (actually any {@linkplain CharSequence character sequence}),
* {@linkplain ParameterSpec parameters}, {@linkplain FieldSpec fields}, {@linkplain
* MethodSpec methods}, and {@linkplain TypeSpec types}.
* <li>{@code $S} escapes the value as a <em>string</em>, wraps it with double quotes, and emits
* that. For example, {@code 6" sandwich} is emitted {@code "6\" sandwich"}.
* <li>{@code $T} emits a <em>type</em> reference. Types will be imported if possible. Arguments
* for types may be {@linkplain Class classes}, {@linkplain javax.lang.model.type.TypeMirror
,* type mirrors}, and {@linkplain javax.lang.model.element.Element elements}.
* <li>{@code $$} emits a dollar sign.
* <li>{@code $W} emits a space or a newline, depending on its position on the line. This prefers
* to wrap lines before 100 columns.
* <li>{@code $Z} acts as a zero-width space. This prefers to wrap lines before 100 columns.
* <li>{@code $>} increases the indentation level.
* <li>{@code $<} decreases the indentation level.
* <li>{@code $[} begins a statement. For multiline statements, every line after the first line
* is double-indented.
* <li>{@code $]} ends a statement.
* </ul>
*/
public final | uses |
java | quarkusio__quarkus | extensions/arc/runtime/src/main/java/io/quarkus/arc/runtime/ConfigRecorder.java | {
"start": 4839,
"end": 6556
} | class ____ {
private String name;
private String rawTypeName;
private List<String> actualTypeArgumentNames;
private String defaultValue;
@RecordableConstructor
public ConfigValidationMetadata(final String name, final String rawTypeName, List<String> actualTypeArgumentNames,
final String defaultValue) {
this.name = name;
this.rawTypeName = rawTypeName;
this.actualTypeArgumentNames = actualTypeArgumentNames;
this.defaultValue = defaultValue;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
public String getRawTypeName() {
return rawTypeName;
}
public List<String> getActualTypeArgumentNames() {
return actualTypeArgumentNames;
}
public String getDefaultValue() {
return defaultValue;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ConfigValidationMetadata that = (ConfigValidationMetadata) o;
return name.equals(that.name) && rawTypeName.equals(that.rawTypeName)
&& actualTypeArgumentNames.equals(that.actualTypeArgumentNames)
&& Objects.equals(defaultValue, that.defaultValue);
}
@Override
public int hashCode() {
return Objects.hash(name, rawTypeName, actualTypeArgumentNames, defaultValue);
}
}
}
| ConfigValidationMetadata |
java | elastic__elasticsearch | x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java | {
"start": 2501,
"end": 16782
} | class ____ extends ESTestCase {
private ClusterState stateWithLatestVersionedIndex;
public static ClusterState randomTransformClusterState() {
return randomTransformClusterState(true);
}
public static ClusterState randomTransformClusterState(boolean shardsReady) {
String uuid = UUIDs.randomBase64UUID();
Map<String, IndexMetadata> indexMapBuilder = new HashMap<>();
try {
IndexMetadata.Builder builder = new IndexMetadata.Builder(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME).settings(
Settings.builder()
.put(TransformInternalIndex.settings(Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, uuid).build()))
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build()
).numberOfReplicas(0).numberOfShards(1).putMapping(Strings.toString(TransformInternalIndex.mappings()));
indexMapBuilder.put(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME, builder.build());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Metadata.Builder metaBuilder = Metadata.builder();
metaBuilder.indices(indexMapBuilder);
ClusterState.Builder csBuilder = ClusterState.builder(ClusterName.DEFAULT);
csBuilder.metadata(metaBuilder.build());
final var index = new Index(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME, uuid);
csBuilder.routingTable(
RoutingTable.builder()
.add(
IndexRoutingTable.builder(index)
.addShard(
TestShardRouting.newShardRouting(
new ShardId(index, 0),
"node_a",
null,
true,
shardsReady ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING
)
)
.build()
)
.build()
);
return csBuilder.build();
}
@Before
public void setupClusterStates() {
stateWithLatestVersionedIndex = randomTransformClusterState();
}
public void testHaveLatestVersionedIndexTemplate() {
assertTrue(TransformInternalIndex.hasLatestVersionedIndex(stateWithLatestVersionedIndex));
assertTrue(TransformInternalIndex.allPrimaryShardsActiveForLatestVersionedIndex(stateWithLatestVersionedIndex));
assertFalse(TransformInternalIndex.hasLatestVersionedIndex(ClusterState.EMPTY_STATE));
assertFalse(TransformInternalIndex.allPrimaryShardsActiveForLatestVersionedIndex(ClusterState.EMPTY_STATE));
assertFalse(TransformInternalIndex.allPrimaryShardsActiveForLatestVersionedIndex(randomTransformClusterState(false)));
}
public void testCreateLatestVersionedIndexIfRequired_GivenNotRequired() {
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(stateWithLatestVersionedIndex);
Client client = mock(Client.class);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verifyNoMoreInteractions(client);
}
public void testCreateLatestVersionedIndexIfRequired_GivenRequired() {
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE);
IndicesAdminClient indicesClient = mock(IndicesAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<CreateIndexResponse> listener = (ActionListener<CreateIndexResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(new CreateIndexResponse(true, true, TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME));
return null;
}).when(indicesClient).create(any(), any());
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.indices()).thenReturn(indicesClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.threadPool()).thenReturn(threadPool);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verify(client, times(1)).threadPool();
verify(client, times(1)).admin();
verifyNoMoreInteractions(client);
verify(adminClient, times(1)).indices();
verifyNoMoreInteractions(adminClient);
verify(indicesClient, times(1)).create(any(), any());
verifyNoMoreInteractions(indicesClient);
}
public void testCreateLatestVersionedIndexIfRequired_GivenShardInitializationPending() {
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(randomTransformClusterState(false));
ClusterAdminClient clusterClient = mock(ClusterAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<ClusterHealthResponse> listener = (ActionListener<ClusterHealthResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(new ClusterHealthResponse());
return null;
}).when(clusterClient).health(any(), any());
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.cluster()).thenReturn(clusterClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.threadPool()).thenReturn(threadPool);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verify(client, times(1)).threadPool();
verify(client, times(1)).admin();
verifyNoMoreInteractions(client);
verify(adminClient, times(1)).cluster();
verifyNoMoreInteractions(adminClient);
verify(clusterClient, times(1)).health(any(), any());
verifyNoMoreInteractions(clusterClient);
}
public void testCreateLatestVersionedIndexIfRequired_GivenConcurrentCreation() {
// simulate the case that 1st the index does not exist, but got created and allocated meanwhile
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE).thenReturn(stateWithLatestVersionedIndex);
IndicesAdminClient indicesClient = mock(IndicesAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<CreateIndexResponse> listener = (ActionListener<CreateIndexResponse>) invocationOnMock.getArguments()[1];
listener.onFailure(new ResourceAlreadyExistsException(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME));
return null;
}).when(indicesClient).create(any(), any());
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.indices()).thenReturn(indicesClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.threadPool()).thenReturn(threadPool);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verify(client, times(1)).threadPool();
verify(client, times(1)).admin();
verifyNoMoreInteractions(client);
verify(adminClient, times(1)).indices();
verifyNoMoreInteractions(adminClient);
verify(indicesClient, times(1)).create(any(), any());
verifyNoMoreInteractions(indicesClient);
}
public void testCreateLatestVersionedIndexIfRequired_GivenConcurrentCreationShardInitializationPending() {
// simulate the case that 1st the index does not exist, but got created, however allocation is pending
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE).thenReturn(randomTransformClusterState(false));
IndicesAdminClient indicesClient = mock(IndicesAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<CreateIndexResponse> listener = (ActionListener<CreateIndexResponse>) invocationOnMock.getArguments()[1];
listener.onFailure(new ResourceAlreadyExistsException(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME));
return null;
}).when(indicesClient).create(any(), any());
ClusterAdminClient clusterClient = mock(ClusterAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<ClusterHealthResponse> listener = (ActionListener<ClusterHealthResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(new ClusterHealthResponse());
return null;
}).when(clusterClient).health(any(), any());
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.indices()).thenReturn(indicesClient);
when(adminClient.cluster()).thenReturn(clusterClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.threadPool()).thenReturn(threadPool);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verify(client, times(2)).threadPool();
verify(client, times(2)).admin();
verifyNoMoreInteractions(client);
verify(adminClient, times(1)).indices();
verify(adminClient, times(1)).cluster();
verifyNoMoreInteractions(adminClient);
verify(indicesClient, times(1)).create(any(), any());
verifyNoMoreInteractions(indicesClient);
verify(clusterClient, times(1)).health(any(), any());
verifyNoMoreInteractions(clusterClient);
}
public void testEnsureLatestIndexAndTemplateInstalled_GivenRequired() {
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE);
IndicesAdminClient indicesClient = mock(IndicesAdminClient.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<CreateIndexResponse> listener = (ActionListener<CreateIndexResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(new CreateIndexResponse(true, true, TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME));
return null;
}).when(indicesClient).create(any(), any());
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.indices()).thenReturn(indicesClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.threadPool()).thenReturn(threadPool);
AtomicBoolean gotResponse = new AtomicBoolean(false);
ActionListener<Void> testListener = ActionTestUtils.assertNoFailureListener(aVoid -> gotResponse.set(true));
TransformInternalIndex.createLatestVersionedIndexIfRequired(clusterService, client, Settings.EMPTY, testListener);
assertTrue(gotResponse.get());
verify(client, times(1)).threadPool();
verify(client, times(1)).admin();
verifyNoMoreInteractions(client);
verify(adminClient, times(1)).indices();
verifyNoMoreInteractions(adminClient);
verify(indicesClient, times(1)).create(any(), any());
verifyNoMoreInteractions(indicesClient);
}
public void testSettings() {
Settings settings = TransformInternalIndex.settings(Settings.EMPTY);
assertThat(settings.get(IndexSettings.INDEX_FAST_REFRESH_SETTING.getKey()), is(nullValue()));
settings = TransformInternalIndex.settings(Settings.builder().put(IndexSettings.INDEX_FAST_REFRESH_SETTING.getKey(), true).build());
assertThat(settings.getAsBoolean(IndexSettings.INDEX_FAST_REFRESH_SETTING.getKey(), false), is(true));
}
}
| TransformInternalIndexTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java | {
"start": 7868,
"end": 8323
} | class ____ use to cache history data.
*/
public static final String MR_HISTORY_STORAGE =
MR_HISTORY_PREFIX + "store.class";
/**
* Enable the history server to store server state and recover server state
* upon startup.
*/
public static final String MR_HS_RECOVERY_ENABLE =
MR_HISTORY_PREFIX + "recovery.enable";
public static final boolean DEFAULT_MR_HS_RECOVERY_ENABLE = false;
/**
* The HistoryServerStateStoreService | to |
java | resilience4j__resilience4j | resilience4j-commons-configuration/src/test/java/io/github/resilience4j/commons/configuration/dummy/DummyPredicateThrowable.java | {
"start": 755,
"end": 999
} | class ____ implements Predicate<Throwable> {
@Override
public boolean test(Throwable throwable) {
return throwable instanceof IOException || throwable instanceof DummyIgnoredException;
}
}
| DummyPredicateThrowable |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java | {
"start": 11510,
"end": 17885
} | class ____ extends CacheLoader<String, Set<String>> {
private ListeningExecutorService executorService;
GroupCacheLoader() {
if (reloadGroupsInBackground) {
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("Group-Cache-Reload")
.setDaemon(true)
.build();
// With coreThreadCount == maxThreadCount we effectively
// create a fixed size thread pool. As allowCoreThreadTimeOut
// has been set, all threads will die after 60 seconds of non use
ThreadPoolExecutor parentExecutor = new ThreadPoolExecutor(
reloadGroupsThreadCount,
reloadGroupsThreadCount,
60,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
threadFactory);
parentExecutor.allowCoreThreadTimeOut(true);
executorService = MoreExecutors.listeningDecorator(parentExecutor);
}
}
/**
* This method will block if a cache entry doesn't exist, and
* any subsequent requests for the same user will wait on this
* request to return. If a user already exists in the cache,
* and when the key expires, the first call to reload the key
* will block, but subsequent requests will return the old
* value until the blocking thread returns.
* If reloadGroupsInBackground is true, then the thread that
* needs to refresh an expired key will not block either. Instead
* it will return the old cache value and schedule a background
* refresh
* @param user key of cache
* @return List of groups belonging to user
* @throws IOException to prevent caching negative entries
*/
@Override
public Set<String> load(String user) throws Exception {
LOG.debug("GroupCacheLoader - load.");
TraceScope scope = null;
Tracer tracer = Tracer.curThreadTracer();
if (tracer != null) {
scope = tracer.newScope("Groups#fetchGroupList");
scope.addKVAnnotation("user", user);
}
Set<String> groups = null;
try {
groups = fetchGroupSet(user);
} finally {
if (scope != null) {
scope.close();
}
}
if (groups.isEmpty()) {
if (isNegativeCacheEnabled()) {
negativeCache.add(user);
}
// We throw here to prevent Cache from retaining an empty group
throw noGroupsForUser(user);
}
return groups;
}
/**
* Override the reload method to provide an asynchronous implementation. If
* reloadGroupsInBackground is false, then this method defers to the super
* implementation, otherwise is arranges for the cache to be updated later
*/
@Override
public ListenableFuture<Set<String>> reload(final String key,
Set<String> oldValue)
throws Exception {
LOG.debug("GroupCacheLoader - reload (async).");
if (!reloadGroupsInBackground) {
return super.reload(key, oldValue);
}
backgroundRefreshQueued.incrementAndGet();
ListenableFuture<Set<String>> listenableFuture =
executorService.submit(() -> {
backgroundRefreshQueued.decrementAndGet();
backgroundRefreshRunning.incrementAndGet();
Set<String> results = load(key);
return results;
});
Futures.addCallback(listenableFuture, new FutureCallback<Set<String>>() {
@Override
public void onSuccess(Set<String> result) {
backgroundRefreshSuccess.incrementAndGet();
backgroundRefreshRunning.decrementAndGet();
}
@Override
public void onFailure(Throwable t) {
backgroundRefreshException.incrementAndGet();
backgroundRefreshRunning.decrementAndGet();
}
}, MoreExecutors.directExecutor());
return listenableFuture;
}
/**
* Queries impl for groups belonging to the user.
* This could involve I/O and take awhile.
*/
private Set<String> fetchGroupSet(String user) throws IOException {
long startMs = timer.monotonicNow();
Set<String> groups = impl.getGroupsSet(user);
long endMs = timer.monotonicNow();
long deltaMs = endMs - startMs ;
UserGroupInformation.metrics.addGetGroups(deltaMs);
if (deltaMs > warningDeltaMs) {
LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
"took " + deltaMs + " milliseconds.");
}
return groups;
}
}
/**
* Refresh all user-to-groups mappings.
*/
public void refresh() {
LOG.info("clearing userToGroupsMap cache");
try {
impl.cacheGroupsRefresh();
} catch (IOException e) {
LOG.warn("Error refreshing groups cache", e);
}
cache.invalidateAll();
if(isNegativeCacheEnabled()) {
negativeCache.clear();
}
}
/**
* Add groups to cache
*
* @param groups list of groups to add to cache
*/
public void cacheGroupsAdd(List<String> groups) {
try {
impl.cacheGroupsAdd(groups);
} catch (IOException e) {
LOG.warn("Error caching groups", e);
}
}
private static Groups GROUPS = null;
/**
* Get the groups being used to map user-to-groups.
* @return the groups being used to map user-to-groups.
*/
public static Groups getUserToGroupsMappingService() {
return getUserToGroupsMappingService(new Configuration());
}
/**
* Get the groups being used to map user-to-groups.
* @param conf configuration.
* @return the groups being used to map user-to-groups.
*/
public static synchronized Groups getUserToGroupsMappingService(
Configuration conf) {
if(GROUPS == null) {
if(LOG.isDebugEnabled()) {
LOG.debug(" Creating new Groups object");
}
GROUPS = new Groups(conf);
}
return GROUPS;
}
/**
* Create new groups used to map user-to-groups with loaded configuration.
* @param conf configuration.
* @return the groups being used to map user-to-groups.
*/
@Private
public static synchronized Groups
getUserToGroupsMappingServiceWithLoadedConfiguration(
Configuration conf) {
GROUPS = new Groups(conf);
return GROUPS;
}
@VisibleForTesting
public static void reset() {
GROUPS = null;
}
}
| GroupCacheLoader |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 122587,
"end": 125179
} | class ____ extends ParserRuleContext {
public TerminalNode RENAME() { return getToken(EsqlBaseParser.RENAME, 0); }
public List<RenameClauseContext> renameClause() {
return getRuleContexts(RenameClauseContext.class);
}
public RenameClauseContext renameClause(int i) {
return getRuleContext(RenameClauseContext.class,i);
}
public List<TerminalNode> COMMA() { return getTokens(EsqlBaseParser.COMMA); }
public TerminalNode COMMA(int i) {
return getToken(EsqlBaseParser.COMMA, i);
}
@SuppressWarnings("this-escape")
public RenameCommandContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_renameCommand; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterRenameCommand(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitRenameCommand(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor<? extends T>)visitor).visitRenameCommand(this);
else return visitor.visitChildren(this);
}
}
public final RenameCommandContext renameCommand() throws RecognitionException {
RenameCommandContext _localctx = new RenameCommandContext(_ctx, getState());
enterRule(_localctx, 86, RULE_renameCommand);
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
setState(478);
match(RENAME);
setState(479);
renameClause();
setState(484);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,33,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
{
{
setState(480);
match(COMMA);
setState(481);
renameClause();
}
}
}
setState(486);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,33,_ctx);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | RenameCommandContext |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_isEqualTo_ignoringArrayOrder_Test.java | {
"start": 10882,
"end": 11939
} | class ____ {
Data[] field1;
Data[] field2;
@Override
public String toString() {
return "DataStore[field1=%s, field2=%s]".formatted(field1, field2);
}
}
record Data(String text) {
}
private static DataStore createDataStore(Data d1, Data d2) {
DataStore dataStore = new DataStore();
dataStore.field1 = array(d1, d2);
dataStore.field2 = array(d1, d2);
return dataStore;
}
@Test
void evaluating_visited_dual_values_should_check_location() {
// GIVEN
Data d1 = new Data("111");
Data d2 = new Data("222");
DataStore dataStore1 = createDataStore(d1, d2);
DataStore dataStore2 = createDataStore(d2, d1);
// WHEN/THEN
then(dataStore1).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForType((data1, data2) -> data1.text.equals(data2.text), Data.class)
.ignoringArrayOrder()
.isEqualTo(dataStore2);
}
// related to https://github.com/assertj/assertj/issues/3598 but for arrays
static | DataStore |
java | spring-projects__spring-boot | module/spring-boot-webmvc/src/main/java/org/springframework/boot/webmvc/actuate/endpoint/web/AbstractWebMvcEndpointHandlerMapping.java | {
"start": 17645,
"end": 17955
} | class ____ implements Function<Object, Object> {
@Override
public Object apply(Object body) {
if (!(body instanceof Flux)) {
return body;
}
return ((Flux<?>) body).collectList();
}
}
}
/**
* Handler for a {@link ServletWebOperation}.
*/
private static final | FluxBodyConverter |
java | quarkusio__quarkus | independent-projects/junit5-virtual-threads/src/test/java/io/quarkus/test/junit5/virtual/internal/ignore/LoomUnitExampleShouldPinOnSuperClassTest.java | {
"start": 375,
"end": 559
} | class ____
public void failWhenShouldNotPinAndPinDetected() {
TestPinJfrEvent.pin();
}
@Test
public void failWhenShouldPinAndNoPinDetected() {
}
}
| annotation |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RMapCacheNativeReactive.java | {
"start": 1132,
"end": 13360
} | interface ____<K, V> extends RMapReactive<K, V>, RDestroyable {
/**
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
* <p>
* If the map previously contained a mapping for
* the key, the old value is replaced by the specified value.
*
* @param key - map key
* @param value - map value
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
* @return previous associated value
*/
Mono<V> put(K key, V value, Duration ttl);
/**
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
* <p>
* If the map previously contained a mapping for
* the key, the old value is replaced by the specified value.
* <p>
* Works faster than usual {@link #put(Object, Object, Duration)}
* as it not returns previous value.
*
* @param key - map key
* @param value - map value
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
*
* @return <code>true</code> if key is a new key in the hash and value was set.
* <code>false</code> if key already exists in the hash and the value was updated.
*/
Mono<Boolean> fastPut(K key, V value, Duration ttl);
/**
* If the specified key is not already associated
* with a value, associate it with the given value.
* <p>
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
*
* @param key - map key
* @param value - map value
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
*
* @return current associated value
*/
Mono<V> putIfAbsent(K key, V value, Duration ttl);
/**
* If the specified key is not already associated
* with a value, associate it with the given value.
* <p>
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
* <p>
* Works faster than usual {@link #putIfAbsent(Object, Object, Duration)}
* as it not returns previous value.
*
* @param key - map key
* @param value - map value
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
*
* @return <code>true</code> if key is a new key in the hash and value was set.
* <code>false</code> if key already exists in the hash
*/
Mono<Boolean> fastPutIfAbsent(K key, V value, Duration ttl);
/**
* Remaining time to live of map entry associated with a <code>key</code>.
*
* @param key map key
* @return time in milliseconds
* -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
*/
Mono<Long> remainTimeToLive(K key);
/**
* Remaining time to live of map entries associated with <code>keys</code>.
*
* @param keys map keys
* @return Time to live mapped by key.
* Time in milliseconds
* -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
*/
Mono<Map<K, Long>> remainTimeToLive(Set<K> keys);
/**
* Associates the specified <code>value</code> with the specified <code>key</code>
* in batch.
* <p>
* If {@link MapWriter} is defined then new map entries will be stored in write-through mode.
*
* @param map - mappings to be stored in this map
* @param ttl - time to live for all key\value entries.
* If <code>0</code> then stores infinitely.
*/
Mono<Void> putAll(java.util.Map<? extends K, ? extends V> map, Duration ttl);
/**
* Clears an expiration timeout or date of specified entry by key.
*
* @param key map key
* @return <code>true</code> if timeout was removed
* <code>false</code> if entry does not have an associated timeout
* <code>null</code> if entry does not exist
*/
Mono<Boolean> clearExpire(K key);
/**
* Clears an expiration timeout or date of specified entries by keys.
*
* @param keys map keys
* @return Boolean mapped by key.
* <code>true</code> if timeout was removed
* <code>false</code> if entry does not have an associated timeout
* <code>null</code> if entry does not exist
*/
Mono<Map<K, Boolean>> clearExpire(Set<K> keys);
/**
* Updates time to live of specified entry by key.
* Entry expires when specified time to live was reached.
* <p>
* Returns <code>false</code> if entry already expired or doesn't exist,
* otherwise returns <code>true</code>.
*
* @param key map key
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return returns <code>false</code> if entry already expired or doesn't exist,
* otherwise returns <code>true</code>.
*/
Mono<Boolean> expireEntry(K key, Duration ttl);
/**
* Sets time to live of specified entry by key.
* If these parameters weren't set before.
* Entry expires when specified time to live was reached.
* <p>
* Returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*
* @param key map key
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*/
Mono<Boolean> expireEntryIfNotSet(K key, Duration ttl);
/**
* Sets time to live of specified entry by key only if it's greater than timeout set before.
* Entry expires when specified time to live was reached.
* <p>
* Returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*
* @param key map key
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*/
Mono<Boolean> expireEntryIfGreater(K key, Duration ttl);
/**
* Sets time to live of specified entry by key only if it's less than timeout set before.
* Entry expires when specified time to live was reached.
* <p>
* Returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*
* @param key map key
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return returns <code>false</code> if entry already has expiration time or doesn't exist,
* otherwise returns <code>true</code>.
*/
Mono<Boolean> expireEntryIfLess(K key, Duration ttl);
/**
* Updates time to live of specified entries by keys.
* Entries expires when specified time to live was reached.
* <p>
* Returns amount of updated entries.
*
* @param keys map keys
* @param ttl time to live for key\value entries.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entries are stored infinitely.
*
* @return amount of updated entries.
*/
Mono<Integer> expireEntries(Set<K> keys, Duration ttl);
/**
* Sets time to live of specified entries by keys only if it's greater than timeout set before.
* Entries expire when specified time to live was reached.
* <p>
* Returns amount of updated entries.
*
* @param keys map keys
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return amount of updated entries.
*/
Mono<Integer> expireEntriesIfGreater(Set<K> keys, Duration ttl);
/**
* Sets time to live of specified entries by keys only if it's less than timeout set before.
* Entries expire when specified time to live was reached.
* <p>
* Returns amount of updated entries.
*
* @param keys map keys
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return amount of updated entries.
*/
Mono<Integer> expireEntriesIfLess(Set<K> keys, Duration ttl);
/**
* Sets time to live of specified entries by keys.
* If these parameters weren't set before.
* Entries expire when specified time to live was reached.
* <p>
* Returns amount of updated entries.
*
* @param keys map keys
* @param ttl time to live for key\value entry.
* If <code>0</code> then time to live doesn't affect entry expiration.
* <p>
* if <code>ttl</code> params are equal to <code>0</code>
* then entry stores infinitely.
*
* @return amount of updated entries.
*/
Mono<Integer> expireEntriesIfNotSet(Set<K> keys, Duration ttl);
/**
* Adds object event listener
*
* @see org.redisson.api.listener.TrackingListener
* @see org.redisson.api.listener.MapPutListener
* @see org.redisson.api.listener.MapRemoveListener
* @see org.redisson.api.listener.MapExpiredListener
* @see org.redisson.api.ExpiredObjectListener
* @see org.redisson.api.DeletedObjectListener
*
* @param listener object event listener
* @return listener id
*/
Mono<Integer> addListener(ObjectListener listener);
/**
* If the specified key is not already associated
* with a value, attempts to compute its value using the given mapping function and enters it into this map .
* <p>
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
*
* @param key - map key
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
* @param mappingFunction the mapping function to compute a value
* @return current associated value
*/
Mono<V> computeIfAbsent(K key, Duration ttl, Function<? super K, ? extends V> mappingFunction);
/**
* Computes a new mapping for the specified key and its current mapped value.
* <p>
* Stores value mapped by key with specified time to live.
* Entry expires after specified time to live.
*
* @param key - map key
* @param ttl - time to live for key\value entry.
* If <code>0</code> then stores infinitely.
* @param remappingFunction - function to compute a value
* @return the new value associated with the specified key, or {@code null} if none
*/
Mono<V> compute(K key, Duration ttl, BiFunction<? super K, ? super V, ? extends V> remappingFunction);
}
| RMapCacheNativeReactive |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/logging/LogLevelTests.java | {
"start": 894,
"end": 2726
} | class ____ {
private Log logger = mock(Log.class);
private Exception exception = new Exception();
@Test
void logWhenTraceLogsAtTrace() {
LogLevel.TRACE.log(this.logger, "test");
LogLevel.TRACE.log(this.logger, "test", this.exception);
then(this.logger).should().trace("test", null);
then(this.logger).should().trace("test", this.exception);
}
@Test
void logWhenDebugLogsAtDebug() {
LogLevel.DEBUG.log(this.logger, "test");
LogLevel.DEBUG.log(this.logger, "test", this.exception);
then(this.logger).should().debug("test", null);
then(this.logger).should().debug("test", this.exception);
}
@Test
void logWhenInfoLogsAtInfo() {
LogLevel.INFO.log(this.logger, "test");
LogLevel.INFO.log(this.logger, "test", this.exception);
then(this.logger).should().info("test", null);
then(this.logger).should().info("test", this.exception);
}
@Test
void logWhenWarnLogsAtWarn() {
LogLevel.WARN.log(this.logger, "test");
LogLevel.WARN.log(this.logger, "test", this.exception);
then(this.logger).should().warn("test", null);
then(this.logger).should().warn("test", this.exception);
}
@Test
void logWhenErrorLogsAtError() {
LogLevel.ERROR.log(this.logger, "test");
LogLevel.ERROR.log(this.logger, "test", this.exception);
then(this.logger).should().error("test", null);
then(this.logger).should().error("test", this.exception);
}
@Test
void logWhenFatalLogsAtFatal() {
LogLevel.FATAL.log(this.logger, "test");
LogLevel.FATAL.log(this.logger, "test", this.exception);
then(this.logger).should().fatal("test", null);
then(this.logger).should().fatal("test", this.exception);
}
@Test
void logWhenOffDoesNotLog() {
LogLevel.OFF.log(this.logger, "test");
LogLevel.OFF.log(this.logger, "test", this.exception);
then(this.logger).shouldHaveNoInteractions();
}
}
| LogLevelTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/features/WriteBigDecimalAsPlainTest.java | {
"start": 309,
"end": 1357
} | class ____ extends TestCase {
public void test_for_feature() throws Exception {
BigDecimal value = new BigDecimal("0.00000001");
Assert.assertEquals("1E-8", JSON.toJSONString(value));
Assert.assertEquals("0.00000001", JSON.toJSONString(value, SerializerFeature.WriteBigDecimalAsPlain));
}
public void test_1() throws Exception {
Model m = new Model();
m.value = new BigDecimal("0.00000001");
Assert.assertEquals("{\"value\":1E-8}", JSON.toJSONString(m));
Assert.assertEquals("{\"value\":0.00000001}", JSON.toJSONString(m, SerializerFeature.WriteBigDecimalAsPlain));
}
public void test_for_feature_BigInteger() throws Exception {
BigInteger value = new BigInteger("2020020700826004000000000000");
Assert.assertEquals("2020020700826004000000000000", JSON.toJSONString(value));
Assert.assertEquals("2020020700826004000000000000", JSON.toJSONString(value, SerializerFeature.WriteBigDecimalAsPlain));
}
public static | WriteBigDecimalAsPlainTest |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/config/AbstractJmsListenerContainerFactory.java | {
"start": 1467,
"end": 8358
} | class ____<C extends AbstractMessageListenerContainer>
implements JmsListenerContainerFactory<C> {
protected final Log logger = LogFactory.getLog(getClass());
private @Nullable ConnectionFactory connectionFactory;
private @Nullable DestinationResolver destinationResolver;
private @Nullable MessageConverter messageConverter;
private @Nullable ExceptionListener exceptionListener;
private @Nullable ErrorHandler errorHandler;
private @Nullable Boolean sessionTransacted;
private @Nullable Integer sessionAcknowledgeMode;
private @Nullable Boolean acknowledgeAfterListener;
private @Nullable Boolean pubSubDomain;
private @Nullable Boolean replyPubSubDomain;
private @Nullable QosSettings replyQosSettings;
private @Nullable Boolean subscriptionDurable;
private @Nullable Boolean subscriptionShared;
private @Nullable String clientId;
private @Nullable Integer phase;
private @Nullable Boolean autoStartup;
private @Nullable ObservationRegistry observationRegistry;
/**
* @see AbstractMessageListenerContainer#setConnectionFactory(ConnectionFactory)
*/
public void setConnectionFactory(ConnectionFactory connectionFactory) {
this.connectionFactory = connectionFactory;
}
/**
* @see AbstractMessageListenerContainer#setDestinationResolver(DestinationResolver)
*/
public void setDestinationResolver(DestinationResolver destinationResolver) {
this.destinationResolver = destinationResolver;
}
/**
* @see AbstractMessageListenerContainer#setMessageConverter(MessageConverter)
*/
public void setMessageConverter(MessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* @since 5.2.8
* @see AbstractMessageListenerContainer#setExceptionListener(ExceptionListener)
*/
public void setExceptionListener(ExceptionListener exceptionListener) {
this.exceptionListener = exceptionListener;
}
/**
* @see AbstractMessageListenerContainer#setErrorHandler(ErrorHandler)
*/
public void setErrorHandler(ErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* @see AbstractMessageListenerContainer#setSessionTransacted(boolean)
*/
public void setSessionTransacted(Boolean sessionTransacted) {
this.sessionTransacted = sessionTransacted;
}
/**
* @see AbstractMessageListenerContainer#setSessionAcknowledgeMode(int)
*/
public void setSessionAcknowledgeMode(Integer sessionAcknowledgeMode) {
this.sessionAcknowledgeMode = sessionAcknowledgeMode;
}
/**
* @since 6.2.6
* @see AbstractMessageListenerContainer#setAcknowledgeAfterListener(boolean)
*/
public void setAcknowledgeAfterListener(Boolean acknowledgeAfterListener) {
this.acknowledgeAfterListener = acknowledgeAfterListener;
}
/**
* @see AbstractMessageListenerContainer#setPubSubDomain(boolean)
*/
public void setPubSubDomain(Boolean pubSubDomain) {
this.pubSubDomain = pubSubDomain;
}
/**
* @see AbstractMessageListenerContainer#setReplyPubSubDomain(boolean)
*/
public void setReplyPubSubDomain(Boolean replyPubSubDomain) {
this.replyPubSubDomain = replyPubSubDomain;
}
/**
* @see AbstractMessageListenerContainer#setReplyQosSettings(QosSettings)
*/
public void setReplyQosSettings(QosSettings replyQosSettings) {
this.replyQosSettings = replyQosSettings;
}
/**
* @see AbstractMessageListenerContainer#setSubscriptionDurable(boolean)
*/
public void setSubscriptionDurable(Boolean subscriptionDurable) {
this.subscriptionDurable = subscriptionDurable;
}
/**
* @see AbstractMessageListenerContainer#setSubscriptionShared(boolean)
*/
public void setSubscriptionShared(Boolean subscriptionShared) {
this.subscriptionShared = subscriptionShared;
}
/**
* @see AbstractMessageListenerContainer#setClientId(String)
*/
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* @see AbstractMessageListenerContainer#setPhase(int)
*/
public void setPhase(int phase) {
this.phase = phase;
}
/**
* @see AbstractMessageListenerContainer#setAutoStartup(boolean)
*/
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
/**
* Set the {@link ObservationRegistry} to be used for recording
* {@linkplain io.micrometer.jakarta9.instrument.jms.JmsObservationDocumentation#JMS_MESSAGE_PROCESS
* JMS message processing observations}.
* <p>Defaults to no-op observations if the registry is not set.
* @since 6.1
* @see AbstractMessageListenerContainer#setObservationRegistry(ObservationRegistry)
*/
public void setObservationRegistry(ObservationRegistry observationRegistry) {
this.observationRegistry = observationRegistry;
}
@Override
public C createListenerContainer(JmsListenerEndpoint endpoint) {
C instance = createContainerInstance();
if (this.connectionFactory != null) {
instance.setConnectionFactory(this.connectionFactory);
}
if (this.destinationResolver != null) {
instance.setDestinationResolver(this.destinationResolver);
}
if (this.messageConverter != null) {
instance.setMessageConverter(this.messageConverter);
}
if (this.exceptionListener != null) {
instance.setExceptionListener(this.exceptionListener);
}
if (this.errorHandler != null) {
instance.setErrorHandler(this.errorHandler);
}
if (this.sessionTransacted != null) {
instance.setSessionTransacted(this.sessionTransacted);
}
if (this.sessionAcknowledgeMode != null) {
instance.setSessionAcknowledgeMode(this.sessionAcknowledgeMode);
}
if (this.acknowledgeAfterListener != null) {
instance.setAcknowledgeAfterListener(this.acknowledgeAfterListener);
}
if (this.pubSubDomain != null) {
instance.setPubSubDomain(this.pubSubDomain);
}
if (this.replyPubSubDomain != null) {
instance.setReplyPubSubDomain(this.replyPubSubDomain);
}
if (this.replyQosSettings != null) {
instance.setReplyQosSettings(this.replyQosSettings);
}
if (this.subscriptionDurable != null) {
instance.setSubscriptionDurable(this.subscriptionDurable);
}
if (this.subscriptionShared != null) {
instance.setSubscriptionShared(this.subscriptionShared);
}
if (this.clientId != null) {
instance.setClientId(this.clientId);
}
if (this.phase != null) {
instance.setPhase(this.phase);
}
if (this.autoStartup != null) {
instance.setAutoStartup(this.autoStartup);
}
if (this.observationRegistry != null) {
instance.setObservationRegistry(this.observationRegistry);
}
initializeContainer(instance);
endpoint.setupListenerContainer(instance);
return instance;
}
/**
* Create an empty container instance.
*/
protected abstract C createContainerInstance();
/**
* Further initialize the specified container.
* <p>Subclasses can inherit from this method to apply extra
* configuration if necessary.
*/
protected void initializeContainer(C instance) {
}
}
| AbstractJmsListenerContainerFactory |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/pubsub/RedisClusterPubSubConnectionIntegrationTests.java | {
"start": 1975,
"end": 19090
} | class ____ extends TestSupport {
private final RedisClusterClient clusterClient;
private final RedisClusterClient clusterClientWithNoRedirects;
private final PubSubTestListener connectionListener = new PubSubTestListener();
private final PubSubTestListener nodeListener = new PubSubTestListener();
private StatefulRedisClusterConnection<String, String> connection;
private StatefulRedisClusterPubSubConnection<String, String> pubSubConnection;
private StatefulRedisClusterPubSubConnection<String, String> pubSubConnection2;
String shardChannel = "shard-channel";
String shardMessage = "shard msg!";
String shardTestChannel = "shard-test-channel";
@Inject
RedisClusterPubSubConnectionIntegrationTests(RedisClusterClient clusterClient, RedisClusterClient clusterClient2) {
this.clusterClient = clusterClient;
ClusterClientOptions.Builder builder = ClusterClientOptions.builder().maxRedirects(0);
clusterClient2.setOptions(builder.build());
this.clusterClientWithNoRedirects = clusterClient2;
}
@BeforeEach
void openPubSubConnection() {
connection = clusterClient.connect();
pubSubConnection = clusterClient.connectPubSub();
pubSubConnection2 = clusterClient.connectPubSub();
pubSubConnection.addListener(connectionListener);
}
@AfterEach
void closePubSubConnection() {
connection.close();
pubSubConnection.close();
pubSubConnection2.close();
connectionListener.clear();
pubSubConnection.removeListener(connectionListener);
}
@Test
void testRegularClientPubSubChannels() {
String nodeId = pubSubConnection.sync().clusterMyId();
RedisClusterNode otherNode = getOtherThan(nodeId);
pubSubConnection.sync().subscribe(key);
List<String> channelsOnSubscribedNode = connection.getConnection(nodeId).sync().pubsubChannels();
assertThat(channelsOnSubscribedNode).hasSize(1);
List<String> channelsOnOtherNode = connection.getConnection(otherNode.getNodeId()).sync().pubsubChannels();
assertThat(channelsOnOtherNode).isEmpty();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void testRegularClientPubSubShardChannels() {
pubSubConnection.sync().ssubscribe(shardChannel);
Integer clusterKeyslot = connection.sync().clusterKeyslot(shardChannel).intValue();
RedisCommands<String, String> rightSlot = connection.sync().nodes(node -> node.getSlots().contains(clusterKeyslot))
.commands(0);
RedisCommands<String, String> wrongSlot = connection.sync().nodes(node -> !node.getSlots().contains(clusterKeyslot))
.commands(0);
List<String> channelsOnSubscribedNode = rightSlot.pubsubShardChannels();
assertThat(channelsOnSubscribedNode).hasSize(1);
List<String> channelsOnOtherNode = wrongSlot.pubsubShardChannels();
assertThat(channelsOnOtherNode).isEmpty();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void subscribeToShardChannel() {
pubSubConnection.sync().ssubscribe(shardChannel);
Wait.untilEquals(1L, connectionListener.getShardCounts()::poll).waitOrTimeout();
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void subscribeToShardChannelViaReplica() {
int clusterKeyslot = connection.sync().clusterKeyslot(shardChannel).intValue();
String thisNode = connection.getPartitions().getPartitionBySlot(clusterKeyslot).getNodeId();
RedisPubSubAsyncCommands<String, String> replica = pubSubConnection.async()
.nodes(node -> thisNode.equals(node.getSlaveOf())).commands(0);
replica.ssubscribe(shardChannel);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void publishToShardChannel() throws Exception {
pubSubConnection.addListener(connectionListener);
pubSubConnection.async().ssubscribe(shardChannel);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
pubSubConnection.async().spublish(shardChannel, shardMessage);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void publishToShardChannelViaDifferentEndpoints() throws Exception {
pubSubConnection.addListener(connectionListener);
pubSubConnection.async().ssubscribe(shardChannel);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
pubSubConnection.async().ssubscribe(shardTestChannel);
Wait.untilEquals(shardTestChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
pubSubConnection.async().spublish(shardChannel, shardMessage);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
pubSubConnection.async().spublish(shardTestChannel, shardMessage);
Wait.untilEquals(shardTestChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void publishToShardChannelViaNewClient() throws Exception {
pubSubConnection.addListener(connectionListener);
pubSubConnection.async().ssubscribe(shardChannel);
StatefulRedisClusterPubSubConnection<String, String> newPubsub = clusterClientWithNoRedirects.connectPubSub();
newPubsub.async().spublish(shardChannel, shardMessage);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
newPubsub.close();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void publishToShardChannelViaNewClientWithNoRedirects() throws Exception {
pubSubConnection.addListener(connectionListener);
pubSubConnection.async().ssubscribe(shardChannel);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
pubSubConnection.async().ssubscribe(shardTestChannel);
Wait.untilEquals(shardTestChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
RedisClusterPubSubAsyncCommands<String, String> cmd = clusterClientWithNoRedirects.connectPubSub().async();
cmd.spublish(shardChannel, shardMessage);
Wait.untilEquals(shardChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
cmd.spublish(shardTestChannel, shardMessage);
Wait.untilEquals(shardTestChannel, connectionListener.getShardChannels()::poll).waitOrTimeout();
Wait.untilEquals(shardMessage, connectionListener.getMessages()::poll).waitOrTimeout();
cmd.getStatefulConnection().close();
}
@Test
@EnabledOnCommand("SSUBSCRIBE")
void unubscribeFromShardChannel() {
pubSubConnection.sync().ssubscribe(shardChannel);
pubSubConnection.sync().spublish(shardChannel, "msg1");
pubSubConnection.sync().sunsubscribe(shardChannel);
pubSubConnection.sync().spublish(shardChannel, "msg2");
pubSubConnection.sync().ssubscribe(shardChannel);
pubSubConnection.sync().spublish(shardChannel, "msg3");
Wait.untilEquals("msg1", connectionListener.getMessages()::poll).waitOrTimeout();
Wait.untilEquals("msg3", connectionListener.getMessages()::poll).waitOrTimeout();
}
@Test
void myIdWorksAfterDisconnect() throws InterruptedException {
BlockingQueue<CommandFailedEvent> failedEvents = new LinkedBlockingQueue<CommandFailedEvent>();
CommandListener listener = new CommandListener() {
@Override
public void commandFailed(CommandFailedEvent event) {
failedEvents.add(event);
}
};
clusterClient.addListener(listener);
StatefulRedisClusterPubSubConnection<String, String> pubsub = clusterClient.connectPubSub();
pubsub.sync().subscribe("foo");
pubsub.async().quit();
Thread.sleep(100);
Wait.untilTrue(pubsub::isOpen).waitOrTimeout();
pubsub.close();
clusterClient.removeListener(listener);
assertThat(failedEvents).isEmpty();
}
@Test
void testRegularClientPublish() throws Exception {
String nodeId = pubSubConnection.sync().clusterMyId();
RedisClusterNode otherNode = getOtherThan(nodeId);
pubSubConnection.sync().subscribe(key);
connection.getConnection(nodeId).sync().publish(key, value);
assertThat(connectionListener.getMessages().take()).isEqualTo(value);
connection.getConnection(otherNode.getNodeId()).sync().publish(key, value);
assertThat(connectionListener.getMessages().take()).isEqualTo(value);
}
@Test
void testPubSubClientPublish() throws Exception {
String nodeId = pubSubConnection.sync().clusterMyId();
pubSubConnection.sync().subscribe(key);
assertThat(pubSubConnection2.sync().clusterMyId()).isEqualTo(nodeId);
pubSubConnection2.sync().publish(key, value);
assertThat(connectionListener.getMessages().take()).isEqualTo(value);
}
@Test
void testConnectToLeastClientsNode() {
clusterClient.reloadPartitions();
String nodeId = pubSubConnection.sync().clusterMyId();
StatefulRedisPubSubConnection<String, String> connectionAfterPartitionReload = clusterClient.connectPubSub();
String newConnectionNodeId = connectionAfterPartitionReload.sync().clusterMyId();
connectionAfterPartitionReload.close();
assertThat(nodeId).isNotEqualTo(newConnectionNodeId);
}
@Test
void testRegularClientPubSubPublish() throws Exception {
String nodeId = pubSubConnection.sync().clusterMyId();
RedisClusterNode otherNode = getOtherThan(nodeId);
pubSubConnection.sync().subscribe(key);
List<String> channelsOnSubscribedNode = connection.getConnection(nodeId).sync().pubsubChannels();
assertThat(channelsOnSubscribedNode).hasSize(1);
RedisCommands<String, String> otherNodeConnection = connection.getConnection(otherNode.getNodeId()).sync();
otherNodeConnection.publish(key, value);
assertThat(connectionListener.getChannels().take()).isEqualTo(key);
}
@Test
void testGetConnectionAsyncByNodeId() {
RedisClusterNode partition = pubSubConnection.getPartitions().getPartition(0);
StatefulRedisPubSubConnection<String, String> node = TestFutures
.getOrTimeout(pubSubConnection.getConnectionAsync(partition.getNodeId()));
assertThat(node.sync().ping()).isEqualTo("PONG");
}
@Test
void testGetConnectionAsyncByHostAndPort() {
RedisClusterNode partition = pubSubConnection.getPartitions().getPartition(0);
RedisURI uri = partition.getUri();
StatefulRedisPubSubConnection<String, String> node = TestFutures
.getOrTimeout(pubSubConnection.getConnectionAsync(uri.getHost(), uri.getPort()));
assertThat(node.sync().ping()).isEqualTo("PONG");
}
@Test
void testNodeIdSubscription() throws Exception {
RedisClusterNode partition = pubSubConnection.getPartitions().getPartition(0);
StatefulRedisPubSubConnection<String, String> node = pubSubConnection.getConnection(partition.getNodeId());
node.addListener(nodeListener);
node.sync().subscribe("channel");
pubSubConnection2.sync().publish("channel", "message");
assertThat(nodeListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().poll()).isNull();
}
@Test
void testNodeMessagePropagationSubscription() throws Exception {
RedisClusterNode partition = pubSubConnection.getPartitions().getPartition(0);
pubSubConnection.setNodeMessagePropagation(true);
StatefulRedisPubSubConnection<String, String> node = pubSubConnection.getConnection(partition.getNodeId());
node.sync().subscribe("channel");
pubSubConnection2.sync().publish("channel", "message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
}
@Test
void testNodeHostAndPortMessagePropagationSubscription() throws Exception {
RedisClusterNode partition = pubSubConnection.getPartitions().getPartition(0);
pubSubConnection.setNodeMessagePropagation(true);
RedisURI uri = partition.getUri();
StatefulRedisPubSubConnection<String, String> node = pubSubConnection.getConnection(uri.getHost(), uri.getPort());
node.sync().subscribe("channel");
pubSubConnection2.sync().publish("channel", "message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
}
@Test
void testAsyncSubscription() throws Exception {
pubSubConnection.setNodeMessagePropagation(true);
PubSubAsyncNodeSelection<String, String> masters = pubSubConnection.async().masters();
NodeSelectionPubSubAsyncCommands<String, String> commands = masters.commands();
TestFutures.awaitOrTimeout(commands.psubscribe("chann*"));
pubSubConnection2.sync().publish("channel", "message");
assertThat(masters.size()).isEqualTo(2);
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
}
@Test
void testSyncSubscription() throws Exception {
pubSubConnection.setNodeMessagePropagation(true);
PubSubNodeSelection<String, String> masters = pubSubConnection.sync().masters();
NodeSelectionPubSubCommands<String, String> commands = masters.commands();
commands.psubscribe("chann*");
pubSubConnection2.sync().publish("channel", "message");
assertThat(masters.size()).isEqualTo(2);
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
}
@Test
void testReactiveSubscription() throws Exception {
pubSubConnection.setNodeMessagePropagation(true);
PubSubReactiveNodeSelection<String, String> masters = pubSubConnection.reactive().masters();
NodeSelectionPubSubReactiveCommands<String, String> commands = masters.commands();
commands.psubscribe("chann*").flux().then().block();
pubSubConnection2.sync().publish("channel", "message");
assertThat(masters.size()).isEqualTo(2);
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().poll()).isNull();
}
@Test
void testClusterListener() throws Exception {
BlockingQueue<RedisClusterNode> nodes = new LinkedBlockingQueue<>();
pubSubConnection.setNodeMessagePropagation(true);
pubSubConnection.addListener(new RedisClusterPubSubAdapter<String, String>() {
@Override
public void message(RedisClusterNode node, String pattern, String channel, String message) {
nodes.add(node);
}
});
PubSubNodeSelection<String, String> masters = pubSubConnection.sync().masters();
NodeSelectionPubSubCommands<String, String> commands = masters.commands();
commands.psubscribe("chann*");
pubSubConnection2.sync().publish("channel", "message");
assertThat(masters.size()).isEqualTo(2);
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().take()).isEqualTo("message");
assertThat(connectionListener.getMessages().poll()).isNull();
assertThat(nodes.take()).isNotNull();
assertThat(nodes.take()).isNotNull();
assertThat(nodes.poll()).isNull();
}
private RedisClusterNode getOtherThan(String nodeId) {
for (RedisClusterNode redisClusterNode : clusterClient.getPartitions()) {
if (redisClusterNode.getNodeId().equals(nodeId)) {
continue;
}
return redisClusterNode;
}
throw new IllegalStateException("No other nodes than " + nodeId + " available");
}
}
| RedisClusterPubSubConnectionIntegrationTests |
java | grpc__grpc-java | alts/src/main/java/io/grpc/alts/AltsChannelBuilder.java | {
"start": 1111,
"end": 1286
} | class ____ up a secure and authenticated
* communication between two cloud VMs using ALTS.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4151")
public final | sets |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/ModeStatsModeEmulation.java | {
"start": 603,
"end": 2324
} | class ____ extends InverseDistributionFunction {
public static final String FUNCTION_NAME = "mode";
public ModeStatsModeEmulation(TypeConfiguration typeConfiguration) {
super(
FUNCTION_NAME,
null,
typeConfiguration
);
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
Predicate filter,
List<SortSpecification> withinGroup,
ReturnableType<?> returnType,
SqlAstTranslator<?> translator) {
final boolean caseWrapper = filter != null && !filterClauseSupported( translator );
sqlAppender.appendSql( "stats_mode(" );
if ( withinGroup == null || withinGroup.size() != 1 ) {
throw new IllegalArgumentException( "MODE function requires a WITHIN GROUP clause with exactly one order by item" );
}
if ( caseWrapper ) {
translator.getCurrentClauseStack().push( Clause.WHERE );
sqlAppender.appendSql( "case when " );
filter.accept( translator );
translator.getCurrentClauseStack().pop();
sqlAppender.appendSql( " then " );
translator.getCurrentClauseStack().push( Clause.WITHIN_GROUP );
withinGroup.get( 0 ).accept( translator );
sqlAppender.appendSql( " else null end)" );
translator.getCurrentClauseStack().pop();
}
else {
translator.getCurrentClauseStack().push( Clause.WITHIN_GROUP );
withinGroup.get( 0 ).accept( translator );
translator.getCurrentClauseStack().pop();
sqlAppender.appendSql( ')' );
if ( filter != null ) {
translator.getCurrentClauseStack().push( Clause.WHERE );
sqlAppender.appendSql( " filter (where " );
filter.accept( translator );
sqlAppender.appendSql( ')' );
translator.getCurrentClauseStack().pop();
}
}
}
}
| ModeStatsModeEmulation |
java | apache__camel | test-infra/camel-test-infra-elasticsearch/src/main/java/org/apache/camel/test/infra/elasticsearch/services/ElasticSearchLocalContainerInfraService.java | {
"start": 3124,
"end": 6094
} | class ____ extends ElasticsearchContainer {
public TestInfraElasticsearchContainer(boolean fixedPort) {
super(DockerImageName.parse(imageName)
.asCompatibleSubstituteFor("docker.elastic.co/elasticsearch/elasticsearch"));
withPassword(PASSWORD);
if (fixedPort) {
addFixedExposedPort(ELASTIC_SEARCH_PORT, ELASTIC_SEARCH_PORT);
} else {
withExposedPorts(ELASTIC_SEARCH_PORT);
}
setWaitStrategy(
new LogMessageWaitStrategy()
.withRegEx(".*(\"message\":\\s?\"started[\\s?|\"].*|] started\n$)")
.withStartupTimeout(Duration.ofSeconds(90)));
}
}
return new TestInfraElasticsearchContainer(ContainerEnvironmentUtil.isFixedPort(this.getClass()));
}
@Override
public int getPort() {
return container.getMappedPort(ELASTIC_SEARCH_PORT);
}
@Override
public String getElasticSearchHost() {
return container.getHost();
}
@Override
public String getHttpHostAddress() {
return container.getHttpHostAddress();
}
@Override
public void registerProperties() {
System.setProperty(ElasticSearchProperties.ELASTIC_SEARCH_HOST, getElasticSearchHost());
System.setProperty(ElasticSearchProperties.ELASTIC_SEARCH_PORT, String.valueOf(getPort()));
getContainer().caCertAsBytes().ifPresent(content -> {
try {
certPath = Files.createTempFile("http_ca", ".crt");
Files.write(certPath, content);
} catch (IOException e) {
throw new RuntimeException(e);
}
sslContext = getContainer().createSslContextFromCa();
});
}
@Override
public void initialize() {
LOG.info("Trying to start the ElasticSearch container");
ContainerEnvironmentUtil.configureContainerStartup(container, ElasticSearchProperties.ELASTIC_SEARCH_CONTAINER_STARTUP,
2);
container.start();
registerProperties();
LOG.info("ElasticSearch instance running at {}", getHttpHostAddress());
}
@Override
public void shutdown() {
LOG.info("Stopping the ElasticSearch container");
container.stop();
}
@Override
public ElasticsearchContainer getContainer() {
return container;
}
@Override
public Optional<String> getCertificatePath() {
return Optional.ofNullable(certPath).map(Objects::toString);
}
@Override
public Optional<SSLContext> getSslContext() {
return Optional.ofNullable(sslContext);
}
@Override
public String getUsername() {
return USER_NAME;
}
@Override
public String getPassword() {
return PASSWORD;
}
}
| TestInfraElasticsearchContainer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/DefaultExtJSONParserTest.java | {
"start": 16949,
"end": 17610
} | class ____ {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
throw new UnsupportedOperationException();
}
}
public void test_error2() throws Exception {
{
Exception error = null;
try {
String text = "{}";
DefaultJSONParser parser = new DefaultJSONParser(text);
parser.parseArray(User.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
}
public static | ErrorObject2 |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/SBOMGenerator.java | {
"start": 1663,
"end": 9363
} | class ____ extends Export {
protected static final String EXPORT_DIR = CommandLineHelper.CAMEL_JBANG_WORK_DIR + "/export";
protected static final String CYCLONEDX_FORMAT = "cyclonedx";
protected static final String SPDX_FORMAT = "spdx";
protected static final String SBOM_JSON_FORMAT = "json";
protected static final String SBOM_XML_FORMAT = "xml";
@CommandLine.Option(names = { "--output-directory" }, description = "Directory where the SBOM will be saved",
defaultValue = ".")
protected String outputDirectory = ".";
@CommandLine.Option(names = { "--output-name" }, description = "Output name of the SBOM file",
defaultValue = "sbom")
protected String outputName = "sbom";
@CommandLine.Option(names = { "--cyclonedx-plugin-version" }, description = "The CycloneDX Maven Plugin version",
defaultValue = "2.9.1")
protected String cyclonedxPluginVersion = "2.9.1";
@CommandLine.Option(names = { "--spdx-plugin-version" }, description = "The SPDX Maven Plugin version",
defaultValue = "0.7.4")
protected String spdxPluginVersion = "0.7.4";
@CommandLine.Option(names = { "--sbom-format" }, description = "The SBOM format, possible values are cyclonedx or spdx",
defaultValue = CYCLONEDX_FORMAT)
protected String sbomFormat = CYCLONEDX_FORMAT;
@CommandLine.Option(names = { "--sbom-output-format" },
description = "The SBOM output format, possible values are json or xml",
defaultValue = SBOM_JSON_FORMAT)
protected String sbomOutputFormat = SBOM_JSON_FORMAT;
public SBOMGenerator(CamelJBangMain main) {
super(main);
}
@Override
public Integer doCall() throws Exception {
this.quiet = true; // be quiet and generate from fresh data to ensure the output is up-to-date
return super.doCall();
}
@Override
protected Integer export() throws Exception {
exportBaseDir = Path.of(".");
// special if user type: camel run . or camel run dirName
if (files != null && files.size() == 1) {
String name = FileUtil.stripTrailingSeparator(files.get(0));
Path first = Path.of(name);
if (Files.isDirectory(first)) {
exportBaseDir = first;
RunHelper.dirToFiles(name, files);
}
}
Integer answer = doExport();
if (answer == 0) {
Path buildDir = Paths.get(EXPORT_DIR);
String mvnProgramCall;
if (FileUtil.isWindows()) {
mvnProgramCall = "cmd /c mvn";
} else {
mvnProgramCall = "mvn";
}
boolean done;
if (sbomFormat.equalsIgnoreCase(CYCLONEDX_FORMAT)) {
String outputDirectoryParameter = "-DoutputDirectory=";
if (Paths.get(outputDirectory).isAbsolute()) {
outputDirectoryParameter += outputDirectory;
} else {
outputDirectoryParameter += "../../" + outputDirectory;
}
ProcessBuilder pb = new ProcessBuilder(
mvnProgramCall,
"org.cyclonedx:cyclonedx-maven-plugin:" + cyclonedxPluginVersion + ":makeAggregateBom",
outputDirectoryParameter,
"-DoutputName=" + outputName,
"-DoutputFormat=" + sbomOutputFormat);
pb.directory(buildDir.toFile());
Process p = pb.start();
done = p.waitFor(60, TimeUnit.SECONDS);
if (!done) {
answer = 1;
}
if (p.exitValue() != 0) {
answer = p.exitValue();
}
} else if (sbomFormat.equalsIgnoreCase(SPDX_FORMAT)) {
String outputDirectoryParameter = null;
String outputFormat = null;
if (Paths.get(outputDirectory).isAbsolute()) {
outputDirectoryParameter = outputDirectory;
} else {
outputDirectoryParameter = "../../" + outputDirectory;
}
if (sbomOutputFormat.equalsIgnoreCase(SBOM_JSON_FORMAT)) {
outputFormat = "JSON";
} else if (sbomOutputFormat.equalsIgnoreCase(SBOM_XML_FORMAT)) {
outputFormat = "RDF/XML";
}
ProcessBuilder pb = new ProcessBuilder(
mvnProgramCall,
"org.spdx:spdx-maven-plugin:" + spdxPluginVersion + ":createSPDX",
"-DspdxFileName=" + Paths.get(outputDirectoryParameter, outputName + "." + sbomOutputFormat),
"-DoutputFormat=" + outputFormat);
pb.directory(buildDir.toFile());
Process p = pb.start();
done = p.waitFor(60, TimeUnit.SECONDS);
if (!done) {
answer = 1;
}
if (p.exitValue() != 0) {
answer = p.exitValue();
}
}
// cleanup dir after complete
org.apache.camel.dsl.jbang.core.common.PathUtils.deleteDirectory(buildDir);
}
return answer;
}
protected Integer doExport() throws Exception {
// read runtime and gav from properties if not configured
Path profile = exportBaseDir.resolve("application.properties");
if (Files.exists(profile)) {
Properties prop = new CamelCaseOrderedProperties();
RuntimeUtil.loadProperties(prop, profile);
if (this.runtime == null && prop.containsKey(CamelJBangConstants.RUNTIME)) {
this.runtime = RuntimeType.fromValue(prop.getProperty(CamelJBangConstants.RUNTIME));
}
if (this.gav == null) {
this.gav = prop.getProperty(GAV);
}
// allow configuring versions from profile
this.javaVersion = prop.getProperty(JAVA_VERSION, this.javaVersion);
this.camelVersion = prop.getProperty(CAMEL_VERSION, this.camelVersion);
this.kameletsVersion = prop.getProperty(KAMELETS_VERSION, this.kameletsVersion);
this.localKameletDir = prop.getProperty(LOCAL_KAMELET_DIR, this.localKameletDir);
this.quarkusGroupId = prop.getProperty(QUARKUS_GROUP_ID, this.quarkusGroupId);
this.quarkusArtifactId = prop.getProperty(QUARKUS_ARTIFACT_ID, this.quarkusArtifactId);
this.quarkusVersion = prop.getProperty(QUARKUS_VERSION, this.quarkusVersion);
this.springBootVersion = prop.getProperty(SPRING_BOOT_VERSION, this.springBootVersion);
}
// use temporary export dir
exportDir = EXPORT_DIR;
if (gav == null) {
gav = "org.example.project:camel-jbang-export:1.0";
}
if (runtime == null) {
runtime = RuntimeType.main;
}
switch (runtime) {
case springBoot -> {
return export(exportBaseDir, new ExportSpringBoot(getMain()));
}
case quarkus -> {
return export(exportBaseDir, new ExportQuarkus(getMain()));
}
case main -> {
return export(exportBaseDir, new ExportCamelMain(getMain()));
}
default -> {
printer().printErr("Unknown runtime: " + runtime);
return 1;
}
}
}
}
| SBOMGenerator |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/QueryableBuiltInRolesProviderFactory.java | {
"start": 458,
"end": 634
} | interface ____ {
QueryableBuiltInRoles.Provider createProvider(ReservedRolesStore reservedRolesStore, FileRolesStore fileRolesStore);
| QueryableBuiltInRolesProviderFactory |
java | apache__camel | components/camel-aws/camel-aws2-sqs/src/test/java/org/apache/camel/component/aws2/sqs/SqsBatchConsumerConcurrentConsumersIT.java | {
"start": 1222,
"end": 2610
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:result")
private MockEndpoint mock;
@Test
public void receiveBatch() throws Exception {
mock.expectedMessageCount(6);
MockEndpoint.assertIsSatisfied(context, 10, TimeUnit.SECONDS);
}
@BindToRegistry("amazonSQSClient")
public AmazonSQSClientMock addClient() {
AmazonSQSClientMock clientMock = new AmazonSQSClientMock();
// add 6 messages, one more than we will poll
for (int counter = 0; counter < 6; counter++) {
Message.Builder message = Message.builder();
message.body("Message " + counter);
message.md5OfBody("6a1559560f67c5e7a7d5d838bf0272ee" + counter);
message.messageId("f6fb6f99-5eb2-4be4-9b15-144774141458" + counter);
message.receiptHandle("0NNAq8PwvXsyZkR6yu4nQ07FGxNmOBWi5" + counter);
clientMock.addMessage(message.build());
}
return clientMock;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("aws2-sqs://MyQueue?amazonSQSClient=#amazonSQSClient&delay=5000&maxMessagesPerPoll=5&concurrentConsumers=2")
.to("mock:result");
}
};
}
}
| SqsBatchConsumerConcurrentConsumersIT |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java | {
"start": 12295,
"end": 22334
} | enum ____ {
XPACK,
ILM,
SLM,
ROLLUPS,
CCR,
SHUTDOWN,
LEGACY_TEMPLATES,
SEARCHABLE_SNAPSHOTS
}
private static EnumSet<ProductFeature> availableFeatures;
private static Set<String> nodesVersions;
protected static TestFeatureService testFeatureService = ALL_FEATURES;
protected static Set<String> getCachedNodesVersions() {
assert nodesVersions != null;
return nodesVersions;
}
protected static Set<String> readVersionsFromNodesInfo(RestClient adminClient) throws IOException {
return getNodesInfo(adminClient).values().stream().map(nodeInfo -> nodeInfo.get("version").toString()).collect(Collectors.toSet());
}
protected static Map<String, Map<?, ?>> getNodesInfo(RestClient adminClient) throws IOException {
Map<?, ?> response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins")));
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
return nodes.entrySet()
.stream()
.collect(Collectors.toUnmodifiableMap(entry -> entry.getKey().toString(), entry -> (Map<?, ?>) entry.getValue()));
}
/**
* Does the cluster being tested support the set of capabilities
* for specified path and method.
*/
protected static Optional<Boolean> clusterHasCapability(
String method,
String path,
Collection<String> parameters,
Collection<String> capabilities
) throws IOException {
return clusterHasCapability(adminClient, method, path, parameters, capabilities);
}
/**
* Does the cluster on the other side of {@code client} support the set
* of capabilities for specified path and method.
*/
protected static Optional<Boolean> clusterHasCapability(
RestClient client,
String method,
String path,
Collection<String> parameters,
Collection<String> capabilities
) throws IOException {
Request request = new Request("GET", "_capabilities");
request.addParameter("method", method);
request.addParameter("path", path);
if (parameters.isEmpty() == false) {
request.addParameter("parameters", String.join(",", parameters));
}
if (capabilities.isEmpty() == false) {
request.addParameter("capabilities", String.join(",", capabilities));
}
try {
Map<String, Object> response = entityAsMap(client.performRequest(request).getEntity());
return Optional.ofNullable((Boolean) response.get("supported"));
} catch (ResponseException responseException) {
if (responseException.getResponse().getStatusLine().getStatusCode() / 100 == 4) {
return Optional.empty(); // we don't know, the capabilities API is unsupported
}
throw responseException;
}
}
protected static boolean clusterHasFeature(String featureId) {
return testFeatureService.clusterHasFeature(featureId, false);
}
protected static boolean clusterHasFeature(NodeFeature feature) {
return testFeatureService.clusterHasFeature(feature.id(), false);
}
protected static boolean testFeatureServiceInitialized() {
return testFeatureService != ALL_FEATURES;
}
/**
* Whether the old cluster version is not of the released versions, but a detached build.
* In that case the Git ref has to be specified via {@code tests.bwc.refspec.main} system property.
*/
protected static boolean isOldClusterDetachedVersion() {
return System.getProperty("tests.bwc.refspec.main") != null;
}
@BeforeClass
public static void initializeProjectIds() {
// The active project-id is slightly longer, and has a fixed prefix so that it's easier to pick in error messages etc.
activeProject = "active00" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
extraProjects = randomSet(1, 3, () -> randomAlphaOfLength(12).toLowerCase(Locale.ROOT));
multiProjectEnabled = Booleans.parseBoolean(System.getProperty("tests.multi_project.enabled", "false"));
}
@Before
public void initClient() throws IOException {
if (client == null) {
assert adminClient == null;
assert clusterHosts == null;
assert availableFeatures == null;
assert nodesVersions == null;
assert testFeatureServiceInitialized() == false;
clusterHosts = parseClusterHosts(getTestRestCluster());
logger.info("initializing REST clients against {}", clusterHosts);
// We add the project ID to the client settings afterward because a lot of subclasses don't call super.restClientSettings(),
// meaning the project ID would be removed from the settings.
var clientSettings = addProjectIdToSettings(restClientSettings());
var adminSettings = restAdminSettings();
var cleanupSettings = cleanupClientSettings();
var hosts = clusterHosts.toArray(new HttpHost[0]);
client = buildClient(clientSettings, hosts);
adminClient = clientSettings.equals(adminSettings) ? client : buildClient(adminSettings, hosts);
cleanupClient = adminSettings.equals(cleanupSettings) ? adminClient : buildClient(cleanupSettings, hosts);
availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES);
Set<String> versions = new HashSet<>();
boolean serverless = false;
for (Map<?, ?> nodeInfo : getNodesInfo(adminClient).values()) {
var nodeVersion = nodeInfo.get("version").toString();
versions.add(nodeVersion);
for (Object module : (List<?>) nodeInfo.get("modules")) {
Map<?, ?> moduleInfo = (Map<?, ?>) module;
final String moduleName = moduleInfo.get("name").toString();
if (moduleName.startsWith("x-pack")) {
availableFeatures.add(ProductFeature.XPACK);
}
if (moduleName.equals("x-pack-ilm")) {
availableFeatures.add(ProductFeature.ILM);
availableFeatures.add(ProductFeature.SLM);
}
if (moduleName.equals("x-pack-rollup")) {
availableFeatures.add(ProductFeature.ROLLUPS);
}
if (moduleName.equals("x-pack-ccr")) {
availableFeatures.add(ProductFeature.CCR);
}
if (moduleName.equals("x-pack-shutdown")) {
availableFeatures.add(ProductFeature.SHUTDOWN);
}
if (moduleName.equals("searchable-snapshots")) {
availableFeatures.add(ProductFeature.SEARCHABLE_SNAPSHOTS);
}
if (moduleName.startsWith("serverless-")) {
serverless = true;
}
}
if (serverless) {
availableFeatures.removeAll(
List.of(
ProductFeature.ILM,
ProductFeature.SLM,
ProductFeature.ROLLUPS,
ProductFeature.CCR,
ProductFeature.LEGACY_TEMPLATES
)
);
}
}
nodesVersions = Collections.unmodifiableSet(versions);
testFeatureService = createTestFeatureService(getClusterStateFeatures(adminClient), fromSemanticVersions(nodesVersions));
configureProjects();
}
assert testFeatureServiceInitialized();
assert client != null;
assert adminClient != null;
assert clusterHosts != null;
assert availableFeatures != null;
assert nodesVersions != null;
}
protected final TestFeatureService createTestFeatureService(
Map<String, Set<String>> clusterStateFeatures,
VersionFeaturesPredicate versionFeaturesPredicate
) {
return new ESRestTestFeatureService(versionFeaturesPredicate, clusterStateFeatures.values());
}
protected static boolean has(ProductFeature feature) {
return availableFeatures.contains(feature);
}
protected List<HttpHost> parseClusterHosts(String hostsString) {
String[] stringUrls = hostsString.split(",");
List<HttpHost> hosts = new ArrayList<>(stringUrls.length);
for (String stringUrl : stringUrls) {
int portSeparator = stringUrl.lastIndexOf(':');
if (portSeparator < 0) {
throw new IllegalArgumentException("Illegal cluster url [" + stringUrl + "]");
}
String host = stringUrl.substring(0, portSeparator);
int port = Integer.valueOf(stringUrl.substring(portSeparator + 1));
hosts.add(buildHttpHost(host, port));
}
return unmodifiableList(hosts);
}
protected String getTestRestCluster() {
String cluster = System.getProperty("tests.rest.cluster");
if (cluster == null) {
throw new RuntimeException(
"Must specify [tests.rest.cluster] system property with a comma delimited list of [host:port] "
+ "to which to send REST requests"
);
}
return cluster;
}
protected String getTestReadinessPorts() {
String ports = System.getProperty("tests.cluster.readiness");
if (ports == null) {
throw new RuntimeException(
"Must specify [tests.rest.cluster.readiness] system property with a comma delimited list "
+ "to which to send readiness requests"
);
}
return ports;
}
/**
* Helper | ProductFeature |
java | spring-projects__spring-boot | core/spring-boot-testcontainers/src/main/java/org/springframework/boot/testcontainers/lifecycle/TestcontainersLifecycleBeanPostProcessor.java | {
"start": 7007,
"end": 7064
} | enum ____ {
UNSTARTED, STARTING, STARTED
}
}
| Startables |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/AdminClient.java | {
"start": 1173,
"end": 1764
} | class ____ implements Admin {
/**
* Create a new Admin with the given configuration.
*
* @param props The configuration.
* @return The new KafkaAdminClient.
*/
public static AdminClient create(Properties props) {
return (AdminClient) Admin.create(props);
}
/**
* Create a new Admin with the given configuration.
*
* @param conf The configuration.
* @return The new KafkaAdminClient.
*/
public static AdminClient create(Map<String, Object> conf) {
return (AdminClient) Admin.create(conf);
}
}
| AdminClient |
java | elastic__elasticsearch | modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java | {
"start": 17887,
"end": 19126
} | class ____ extends IngestTestPlugin {
@Override
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
Map<String, Processor.Factory> processors = new HashMap<>();
processors.put(
"drop",
(factories, tag, description, config, projectId) -> new TestProcessor(tag, "drop", description, ingestDocument -> null)
);
processors.put("reroute", (factories, tag, description, config, projectId) -> {
String destination = (String) config.remove("destination");
return new TestProcessor(
tag,
"reroute",
description,
(Consumer<IngestDocument>) ingestDocument -> ingestDocument.reroute(destination)
);
});
processors.put(
"fail",
(processorFactories, tag, description, config, projectId) -> new TestProcessor(
tag,
"fail",
description,
new RuntimeException()
)
);
return processors;
}
}
}
| CustomIngestTestPlugin |
java | quarkusio__quarkus | extensions/kafka-client/runtime/src/main/java/io/quarkus/kafka/client/tls/QuarkusKafkaSslEngineFactory.java | {
"start": 721,
"end": 5272
} | class ____ implements SslEngineFactory {
private static final Logger log = Logger.getLogger(QuarkusKafkaSslEngineFactory.class);
/**
* Omits 'ssl.endpoint.identification.algorithm' because it is set by the user and it is not ignored
*/
private static final Set<String> KAFKA_SSL_CONFIGS = Set.of(
SslConfigs.SSL_KEYSTORE_TYPE_CONFIG,
SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG,
SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG,
SslConfigs.SSL_KEY_PASSWORD_CONFIG,
SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG,
SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG,
SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG,
SslConfigs.SSL_KEYSTORE_KEY_CONFIG,
SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG,
SslConfigs.SSL_PROTOCOL_CONFIG,
SslConfigs.SSL_PROVIDER_CONFIG,
SslConfigs.SSL_CIPHER_SUITES_CONFIG,
SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG,
SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG,
SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG,
SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG);
private TlsConfiguration configuration;
private SSLContext sslContext;
@Override
public SSLEngine createClientSslEngine(String peerHost, int peerPort, String endpointIdentification) {
SSLEngine sslEngine = sslContext.createSSLEngine(peerHost, peerPort);
sslEngine.setUseClientMode(true);
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm(endpointIdentification);
sslEngine.setSSLParameters(sslParameters);
return sslEngine;
}
@Override
public SSLEngine createServerSslEngine(String peerHost, int peerPort) {
throw new IllegalStateException("Server mode is not supported");
}
@Override
public boolean shouldBeRebuilt(Map<String, Object> nextConfigs) {
return false;
}
@Override
public Set<String> reconfigurableConfigs() {
return Set.of();
}
@Override
public KeyStore keystore() {
return configuration.getKeyStore();
}
@Override
public KeyStore truststore() {
return configuration.getTrustStore();
}
@Override
public void close() throws IOException {
this.sslContext = null;
this.configuration = null;
}
@Override
public void configure(Map<String, ?> configs) {
String tlsConfigName = (String) configs.get(TLS_CONFIG_NAME_KEY);
if (tlsConfigName == null) {
throw new IllegalArgumentException(
"The 'tls-configuration-name' property is required for Kafka Quarkus TLS Registry integration.");
}
Instance<TlsConfigurationRegistry> tlsConfig = CDI.current().getBeanManager().createInstance()
.select(TlsConfigurationRegistry.class);
if (!tlsConfig.isUnsatisfied()) {
TlsConfigurationRegistry registry = tlsConfig.get();
configuration = registry.get(tlsConfigName)
.orElseThrow(() -> new IllegalArgumentException("No TLS configuration found for name " + tlsConfigName));
try {
sslContext = configuration.createSSLContext();
} catch (Exception e) {
throw new RuntimeException("Failed to create SSLContext", e);
}
String clientId = (String) configs.get(CommonClientConfigs.CLIENT_ID_CONFIG);
log.debugf("Configured Kafka client '%s' QuarkusKafkaSslEngineFactory with TLS configuration : %s",
clientId, tlsConfigName);
}
}
/**
* Check if any SSL configuration is set for the Kafka client that will be ignored because the TLS configuration is set
*
* @param configs the Kafka client configuration
*/
public static void checkForOtherSslConfigs(Map<String, ?> configs) {
String tlsConfigName = (String) configs.get(TLS_CONFIG_NAME_KEY);
for (String sslConfig : KAFKA_SSL_CONFIGS) {
if (configs.containsKey(sslConfig)) {
log.warnf(
"The SSL configuration '%s' is set for Kafka client '%s' but it will be ignored because the TLS configuration '%s' is set",
sslConfig, configs.get(CommonClientConfigs.CLIENT_ID_CONFIG), tlsConfigName);
}
}
}
}
| QuarkusKafkaSslEngineFactory |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/include/IncludeTest.java | {
"start": 1645,
"end": 2183
} | class ____ {
static native TemplateInstance test(TemplateContext context);
}
@Inject
Template detail;
@Test
public void testIncludeSection() {
assertEquals("OK:my foo", detail.render());
}
@Test
public void testCheckedTemplate() {
assertEquals("<body>\n"
+ "Hello from test template\n"
+ "<p>\n"
+ " Context data: Hello\n"
+ "</body>", Templates.test(new TemplateContext()).render());
}
public static | Templates |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_555_setter.java | {
"start": 443,
"end": 715
} | class ____ {
@JSONField(serialize = true, deserialize = false)
private Spec spec;
public Spec getSpec() {
return spec;
}
public void setSpec(Spec spec) {
this.spec = spec;
}
}
public static | B |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/superbuilder/PassengerDto.java | {
"start": 200,
"end": 386
} | class ____ {
private final String name;
public PassengerDto(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
| PassengerDto |
java | elastic__elasticsearch | client/rest/src/main/java/org/elasticsearch/client/PreferHasAttributeNodeSelector.java | {
"start": 1295,
"end": 3198
} | class ____ implements NodeSelector {
private final String key;
private final String value;
public PreferHasAttributeNodeSelector(String key, String value) {
this.key = key;
this.value = value;
}
@Override
public void select(Iterable<Node> nodes) {
boolean foundAtLeastOne = false;
for (Node node : nodes) {
Map<String, List<String>> attributes = node.getAttributes();
if (attributes == null) {
continue;
}
List<String> values = attributes.get(key);
if (values == null) {
continue;
}
if (values.contains(value)) {
foundAtLeastOne = true;
break;
}
}
if (foundAtLeastOne) {
Iterator<Node> nodeIterator = nodes.iterator();
while (nodeIterator.hasNext()) {
Map<String, List<String>> attributes = nodeIterator.next().getAttributes();
if (attributes == null) {
continue;
}
List<String> values = attributes.get(key);
if (values == null || values.contains(value) == false) {
nodeIterator.remove();
}
}
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PreferHasAttributeNodeSelector that = (PreferHasAttributeNodeSelector) o;
return Objects.equals(key, that.key) && Objects.equals(value, that.value);
}
@Override
public int hashCode() {
return Objects.hash(key, value);
}
@Override
public String toString() {
return key + "=" + value;
}
}
| PreferHasAttributeNodeSelector |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java | {
"start": 1712,
"end": 6174
} | class ____ {
/**
* Create a new instance of <code>RegisterApplicationMasterRequest</code>.
* If <em>port, trackingUrl</em> is not used, use the following default value:
* <ul>
* <li>port: -1</li>
* <li>trackingUrl: null</li>
* </ul>
* The port is allowed to be any integer larger than or equal to -1.
* @param host host on which the ApplicationMaster is running.
* @param port the RPC port on which the ApplicationMaster is responding.
* @param trackingUrl tracking URL for the ApplicationMaster.
* @return the new instance of <code>RegisterApplicationMasterRequest</code>
*/
@Public
@Stable
public static RegisterApplicationMasterRequest newInstance(String host,
int port, String trackingUrl) {
RegisterApplicationMasterRequest request =
Records.newRecord(RegisterApplicationMasterRequest.class);
request.setHost(host);
request.setRpcPort(port);
request.setTrackingUrl(trackingUrl);
return request;
}
/**
* Get the <em>host</em> on which the <code>ApplicationMaster</code> is
* running.
* @return <em>host</em> on which the <code>ApplicationMaster</code> is running
*/
@Public
@Stable
public abstract String getHost();
/**
* Set the <em>host</em> on which the <code>ApplicationMaster</code> is
* running.
* @param host <em>host</em> on which the <code>ApplicationMaster</code>
* is running
*/
@Public
@Stable
public abstract void setHost(String host);
/**
* Get the <em>RPC port</em> on which the {@code ApplicationMaster} is
* responding.
* @return the <em>RPC port</em> on which the {@code ApplicationMaster}
* is responding
*/
@Public
@Stable
public abstract int getRpcPort();
/**
* Set the <em>RPC port</em> on which the {@code ApplicationMaster} is
* responding.
* @param port <em>RPC port</em> on which the {@code ApplicationMaster}
* is responding
*/
@Public
@Stable
public abstract void setRpcPort(int port);
/**
* Get the <em>tracking URL</em> for the <code>ApplicationMaster</code>.
* This url if contains scheme then that will be used by resource manager
* web application proxy otherwise it will default to http.
* @return <em>tracking URL</em> for the <code>ApplicationMaster</code>
*/
@Public
@Stable
public abstract String getTrackingUrl();
/**
* Set the <em>tracking URL</em>for the <code>ApplicationMaster</code> while
* it is running. This is the web-URL to which ResourceManager or
* web-application proxy will redirect client/users while the application and
* the <code>ApplicationMaster</code> are still running.
* <p>
* If the passed url has a scheme then that will be used by the
* ResourceManager and web-application proxy, otherwise the scheme will
* default to http.
* </p>
* <p>
* Empty, null, "N/A" strings are all valid besides a real URL. In case an url
* isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
* <p>
*
* @param trackingUrl
* <em>tracking URL</em>for the <code>ApplicationMaster</code>
*/
@Public
@Stable
public abstract void setTrackingUrl(String trackingUrl);
/**
* Return all Placement Constraints specified at the Application level. The
* mapping is from a set of allocation tags to a
* <code>PlacementConstraint</code> associated with the tags, i.e., each
* {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those
* tags will be placed taking into account the corresponding constraint.
*
* @return A map of Placement Constraints.
*/
@Public
@Unstable
public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
return new HashMap<>();
}
/**
* Set Placement Constraints applicable to the
* {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
* of this application.
* The mapping is from a set of allocation tags to a
* <code>PlacementConstraint</code> associated with the tags.
* For example:
* Map <
* <hb_regionserver> -> node_anti_affinity,
* <hb_regionserver, hb_master> -> rack_affinity,
* ...
* >
* @param placementConstraints Placement Constraint Mapping.
*/
@Public
@Unstable
public void setPlacementConstraints(
Map<Set<String>, PlacementConstraint> placementConstraints) {
}
}
| RegisterApplicationMasterRequest |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/ProgrammaticExtensionRegistrationTests.java | {
"start": 21238,
"end": 21919
} | class ____ implements TestInstancePostProcessor {
private static final Predicate<Field> isCrystalBall = field -> CrystalBall.class.isAssignableFrom(
field.getType());
@Override
public void postProcessTestInstance(Object testInstance, ExtensionContext context) {
// @formatter:off
findAnnotatedFields(testInstance.getClass(), RegisterExtension.class, isCrystalBall).stream()
.findFirst()
.ifPresent(field -> {
try {
makeAccessible(field).set(testInstance, new CrystalBall("Outlook good"));
}
catch (Throwable t) {
throw ExceptionUtils.throwAsUncheckedException(t);
}
});
// @formatter:on
}
}
static | ExtensionInjector |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/health/MyFooHealthCheckTest.java | {
"start": 1192,
"end": 2545
} | class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testMyFoo() {
context.start();
HealthCheck hc = PluginHelper.getHealthCheckResolver(context).resolveHealthCheck("myfoo");
Assertions.assertNotNull(hc);
Assertions.assertEquals("acme", hc.getGroup());
Assertions.assertEquals("myfoo", hc.getId());
HealthCheck.Result r = hc.call();
Assertions.assertEquals(HealthCheck.State.DOWN, r.getState());
Assertions.assertEquals("Chaos Monkey was here", r.getMessage().get());
}
@Test
public void testAddToRegistry() {
context.start();
HealthCheck hc = PluginHelper.getHealthCheckResolver(context).resolveHealthCheck("myfoo");
Assertions.assertNotNull(hc);
HealthCheckRegistry hcr = context.getCamelContextExtension().getContextPlugin(HealthCheckRegistry.class);
hcr.register(hc);
Collection<HealthCheck.Result> col = HealthCheckHelper.invoke(context);
Assertions.assertEquals(1, col.size());
HealthCheck.Result r = col.iterator().next();
Assertions.assertEquals(HealthCheck.State.DOWN, r.getState());
Assertions.assertEquals("Chaos Monkey was here", r.getMessage().get());
}
}
| MyFooHealthCheckTest |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/livereload/ConnectionInputStream.java | {
"start": 855,
"end": 3087
} | class ____ extends FilterInputStream {
private static final String HEADER_END = "\r\n\r\n";
private static final int BUFFER_SIZE = 4096;
ConnectionInputStream(InputStream in) {
super(in);
}
/**
* Read the HTTP header from the {@link InputStream}. Note: This method doesn't expect
* any HTTP content after the header since the initial request is usually just a
* WebSocket upgrade.
* @return the HTTP header
* @throws IOException in case of I/O errors
*/
String readHeader() throws IOException {
byte[] buffer = new byte[BUFFER_SIZE];
StringBuilder content = new StringBuilder(BUFFER_SIZE);
while (content.indexOf(HEADER_END) == -1) {
int amountRead = checkedRead(buffer, 0, BUFFER_SIZE);
content.append(new String(buffer, 0, amountRead));
}
return content.substring(0, content.indexOf(HEADER_END));
}
/**
* Repeatedly read the underlying {@link InputStream} until the requested number of
* bytes have been loaded.
* @param buffer the destination buffer
* @param offset the buffer offset
* @param length the amount of data to read
* @throws IOException in case of I/O errors
*/
void readFully(byte[] buffer, int offset, int length) throws IOException {
while (length > 0) {
int amountRead = checkedRead(buffer, offset, length);
offset += amountRead;
length -= amountRead;
}
}
/**
* Read a single byte from the stream (checking that the end of the stream hasn't been
* reached).
* @return the content
* @throws IOException in case of I/O errors
*/
int checkedRead() throws IOException {
int b = read();
if (b == -1) {
throw new IOException("End of stream");
}
return (b & 0xff);
}
/**
* Read a number of bytes from the stream (checking that the end of the stream hasn't
* been reached).
* @param buffer the destination buffer
* @param offset the buffer offset
* @param length the length to read
* @return the amount of data read
* @throws IOException in case of I/O errors
*/
int checkedRead(byte[] buffer, int offset, int length) throws IOException {
int amountRead = read(buffer, offset, length);
if (amountRead == -1) {
throw new IOException("End of stream");
}
return amountRead;
}
}
| ConnectionInputStream |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/decoder/StreamResultDecoder.java | {
"start": 894,
"end": 3139
} | class ____ implements MultiDecoder<Object> {
private final boolean firstResult;
public StreamResultDecoder(boolean firstResult) {
super();
this.firstResult = firstResult;
}
@Override
public Object decode(List<Object> parts, State state) {
List<List<Object>> list = (List<List<Object>>) (Object) parts;
// Map<String, Map<StreamMessageId, Map<Object, Object>>> result = list.stream().collect(
// Collectors.groupingBy(v -> (String) v.get(0),
// Collectors.mapping(v -> (List<List<Object>>) v.get(1),
// Collector.of(LinkedHashMap::new,
// (m, l) -> {
// for (List<Object> objects : l) {
// m.put((StreamMessageId) objects.get(0), (Map<Object, Object>) objects.get(1));
// }
// },
// (x, y) -> {
// x.putAll(y);
// return x;
// })
// )));
//
// result.values().removeAll(Collections.singleton(new HashMap()));
//
// if (firstResult && !result.isEmpty()) {
// return result.values().iterator().next();
// }
// return result;
Map<String, Map<StreamMessageId, Map<Object, Object>>> result = new HashMap<>();
for (List<Object> entries : list) {
List<List<Object>> streamEntries = (List<List<Object>>) entries.get(1);
if (!streamEntries.isEmpty()) {
String name = (String) entries.get(0);
Map<StreamMessageId, Map<Object, Object>> ee = new LinkedHashMap<>();
result.put(name, ee);
for (List<Object> se : streamEntries) {
ee.put((StreamMessageId) se.get(0), (Map<Object, Object>) se.get(1));
}
if (firstResult) {
return ee;
}
}
}
return result;
}
}
| StreamResultDecoder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java | {
"start": 753,
"end": 854
} | class ____ the an {@link IndexShard} level {@linkplain IndicesRequestCache.CacheEntity}.
*/
abstract | for |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/intarrays/IntArrays_assertNullOrEmpty_Test.java | {
"start": 1344,
"end": 1973
} | class ____ extends IntArraysBaseTest {
@Test
void should_fail_if_array_is_not_null_and_is_not_empty() {
AssertionInfo info = someInfo();
int[] actual = { 6, 8 };
Throwable error = catchThrowable(() -> arrays.assertNullOrEmpty(info, actual));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeNullOrEmpty(actual));
}
@Test
void should_pass_if_array_is_null() {
arrays.assertNullOrEmpty(someInfo(), null);
}
@Test
void should_pass_if_array_is_empty() {
arrays.assertNullOrEmpty(someInfo(), emptyArray());
}
}
| IntArrays_assertNullOrEmpty_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/AttributeConverterTest.java | {
"start": 21303,
"end": 21708
} | class ____ implements AttributeConverter<Integer,String> {
@Override
public String convertToDatabaseColumn(Integer attribute) {
return attribute == null ? null : attribute.toString();
}
@Override
public Integer convertToEntityAttribute(String dbData) {
return dbData == null ? null : Integer.valueOf( dbData );
}
}
@Converter( autoApply = true )
public static | IntegerToVarcharConverter |
java | google__guice | extensions/assistedinject/test/com/google/inject/assistedinject/FactoryProviderTest.java | {
"start": 4568,
"end": 4840
} | class ____ implements Car {
private final double engineSize;
private final Color color;
@AssistedInject
public Mustang(double engineSize, @Assisted Color color) {
this.engineSize = engineSize;
this.color = color;
}
}
public static | Mustang |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/ServiceAccountInfo.java | {
"start": 708,
"end": 2541
} | class ____ implements Writeable, ToXContent {
private final String principal;
private final RoleDescriptor roleDescriptor;
public ServiceAccountInfo(String principal, RoleDescriptor roleDescriptor) {
this.principal = Objects.requireNonNull(principal, "service account principal cannot be null");
this.roleDescriptor = Objects.requireNonNull(roleDescriptor, "service account descriptor cannot be null");
}
public ServiceAccountInfo(StreamInput in) throws IOException {
this.principal = in.readString();
this.roleDescriptor = new RoleDescriptor(in);
}
public String getPrincipal() {
return principal;
}
public RoleDescriptor getRoleDescriptor() {
return roleDescriptor;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(principal);
roleDescriptor.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(principal);
builder.field("role_descriptor");
roleDescriptor.toXContent(builder, params);
builder.endObject();
return builder;
}
@Override
public String toString() {
return "ServiceAccountInfo{" + "principal='" + principal + '\'' + ", roleDescriptor=" + roleDescriptor + '}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ServiceAccountInfo that = (ServiceAccountInfo) o;
return principal.equals(that.principal) && roleDescriptor.equals(that.roleDescriptor);
}
@Override
public int hashCode() {
return Objects.hash(principal, roleDescriptor);
}
}
| ServiceAccountInfo |
java | quarkusio__quarkus | extensions/reactive-routes/deployment/src/test/java/io/quarkustest/execannotations/ExecAnnotationInvalidTest.java | {
"start": 932,
"end": 1046
} | class ____ {
@Blocking
String hello() {
return "Hello world!";
}
}
}
| MyService |
java | apache__flink | flink-docs/src/test/java/org/apache/flink/docs/rest/data/TestExcludeMessageHeaders.java | {
"start": 1661,
"end": 3250
} | class ____
implements RuntimeMessageHeaders<
EmptyRequestBody, EmptyResponseBody, EmptyMessageParameters> {
private static final String URL = "/test/excluded";
private static final String DESCRIPTION =
"This REST API should not appear in the generated documentation.";
private final String url;
private final String description;
public TestExcludeMessageHeaders() {
this.url = URL;
this.description = DESCRIPTION;
}
public TestExcludeMessageHeaders(String url, String description) {
this.url = URL;
this.description = DESCRIPTION;
}
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public Class<EmptyResponseBody> getResponseClass() {
return EmptyResponseBody.class;
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public String getDescription() {
return description;
}
@Override
public EmptyMessageParameters getUnresolvedMessageParameters() {
return EmptyMessageParameters.getInstance();
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
@Override
public Collection<RuntimeRestAPIVersion> getSupportedAPIVersions() {
return Collections.singleton(RuntimeRestAPIVersion.V0);
}
}
| TestExcludeMessageHeaders |
java | quarkusio__quarkus | extensions/oidc-client-reactive-filter/deployment/src/test/java/io/quarkus/oidc/client/reactive/filter/ExtendedOidcClientReactiveFilterDevModeTest.java | {
"start": 468,
"end": 4461
} | class ____ {
private static final Class<?>[] testClasses = {
ProtectedResource.class,
ProtectedResourceServiceAnnotationOidcClient.class,
ProtectedResourceServiceConfigPropertyOidcClient.class,
ProtectedResourceServiceExtendedOidcClientRequestReactiveFilter.class,
ExtendedOidcClientRequestReactiveFilter.class,
ExtendedOidcClientRequestReactiveFilterResource.class,
ClientWebApplicationExceptionMapper.class
};
@RegisterExtension
static final QuarkusDevModeTest test = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClasses(testClasses)
.addAsResource("application-extended-oidc-client-reactive-filter.properties", "application.properties"));
@Test
public void testGerUserConfigPropertyAndAnnotation() {
// test OidcClientFilter with OidcClient selected via annotation or config-property
// Client feature is disabled
// OidcClient selected via @OidcClient("clientName")
RestAssured.when().get("/oidc-client/annotation/user-name")
.then()
.statusCode(401);
RestAssured.when().get("/oidc-client/annotation/anonymous-user-name")
.then()
.statusCode(204)
.body(equalTo(""));
// @OidcClientFilter: OidcClient selected via `quarkus.oidc-client-filter.client-name=config-property`
RestAssured.when().get("/oidc-client/config-property/user-name")
.then()
.statusCode(401);
RestAssured.when().get("/oidc-client/config-property/anonymous-user-name")
.then()
.statusCode(204)
.body(equalTo(""));
// @RegisterProvider(OidcClientRequestReactiveFilter.class): OidcClient selected via `quarkus.oidc-client-filter.client-name=config-property`
RestAssured.when().get("/oidc-client/custom-provider-config-property/user-name")
.then()
.statusCode(401);
RestAssured.when().get("/oidc-client/custom-provider-config-property/anonymous-user-name")
.then()
.statusCode(204)
.body(equalTo(""));
test.modifyResourceFile("application.properties", s -> s.replace(".enabled=false", ".enabled=true"));
// Client feature is enabled
// OidcClient selected via @OidcClient("clientName")
RestAssured.when().get("/oidc-client/annotation/user-name")
.then()
.statusCode(200)
.body(equalTo("jdoe"));
RestAssured.when().get("/oidc-client/annotation/anonymous-user-name")
.then()
.statusCode(200)
.body(equalTo("jdoe"));
// @OidcClientFilter: OidcClient selected via `quarkus.oidc-client-filter.client-name=config-property`
RestAssured.when().get("/oidc-client/config-property/user-name")
.then()
.statusCode(200)
.body(equalTo("alice"));
RestAssured.when().get("/oidc-client/config-property/anonymous-user-name")
.then()
.statusCode(200)
.body(equalTo("alice"));
// @RegisterProvider(ExtendedOidcClientRequestReactiveFilter.class)
// OidcClient selected via `quarkus.oidc-client-filter.client-name=config-property`
// ExtendedOidcClientRequestReactiveFilter extends OidcClientRequestReactiveFilter
RestAssured.when().get("/oidc-client/custom-provider-config-property/user-name")
.then()
.statusCode(200)
.body(equalTo("alice"));
RestAssured.when().get("/oidc-client/custom-provider-config-property/anonymous-user-name")
.then()
.statusCode(200)
.body(equalTo("alice"));
}
}
| ExtendedOidcClientReactiveFilterDevModeTest |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/ReadFromUnitTests.java | {
"start": 1747,
"end": 14681
} | class ____ {
private Partitions sut = new Partitions();
private RedisClusterNode nearest = new RedisClusterNode();
private RedisClusterNode master = new RedisClusterNode();
private RedisClusterNode replica = new RedisClusterNode();
@BeforeEach
void before() {
master.setFlags(Collections.singleton(RedisClusterNode.NodeFlag.UPSTREAM));
nearest.setFlags(Collections.singleton(RedisClusterNode.NodeFlag.REPLICA));
replica.setFlags(Collections.singleton(RedisClusterNode.NodeFlag.REPLICA));
sut.addPartition(nearest);
sut.addPartition(master);
sut.addPartition(replica);
}
@Test
void master() {
List<RedisNodeDescription> result = ReadFrom.UPSTREAM.select(getNodes());
assertThat(result).hasSize(1).containsOnly(master);
}
@Test
void masterPreferred() {
List<RedisNodeDescription> result = ReadFrom.UPSTREAM_PREFERRED.select(getNodes());
assertThat(result).hasSize(3).containsExactly(master, nearest, replica);
}
@Test
void replica() {
List<RedisNodeDescription> result = ReadFrom.REPLICA.select(getNodes());
assertThat(result).hasSize(2).contains(nearest, replica);
}
@Test
void replicaPreferred() {
List<RedisNodeDescription> result = ReadFrom.REPLICA_PREFERRED.select(getNodes());
assertThat(result).hasSize(3).containsExactly(nearest, replica, master);
}
@Test
void nearest() {
List<RedisNodeDescription> result = ReadFrom.NEAREST.select(getNodes());
assertThat(result).hasSize(3).containsExactly(nearest, master, replica);
}
@Test
void anyReplica() {
List<RedisNodeDescription> result = ReadFrom.ANY_REPLICA.select(getNodes());
assertThat(result).hasSize(2).containsExactly(nearest, replica);
}
@Test
void subnetIpv4RuleIpv6NodeGiven() {
ReadFrom sut = ReadFrom.subnet("0.0.0.0/0");
RedisClusterNode ipv6node = createNodeWithHost("2001:db8:abcd:1000::");
List<RedisNodeDescription> result = sut.select(getNodes(ipv6node));
assertThat(result).isEmpty();
}
@Test
void subnetIpv4RuleAnyNode() {
ReadFrom sut = ReadFrom.subnet("0.0.0.0/0");
RedisClusterNode node = createNodeWithHost("192.0.2.1");
List<RedisNodeDescription> result = sut.select(getNodes(node));
assertThat(result).hasSize(1).containsExactly(node);
}
@Test
void subnetIpv6RuleIpv4NodeGiven() {
ReadFrom sut = ReadFrom.subnet("::/0");
RedisClusterNode node = createNodeWithHost("192.0.2.1");
List<RedisNodeDescription> result = sut.select(getNodes(node));
assertThat(result).isEmpty();
}
@Test
void subnetIpv6RuleAnyNode() {
ReadFrom sut = ReadFrom.subnet("::/0");
RedisClusterNode node = createNodeWithHost("2001:db8:abcd:1000::");
List<RedisNodeDescription> result = sut.select(getNodes(node));
assertThat(result).hasSize(1).containsExactly(node);
}
@Test
void subnetIpv4Ipv6Mixed() {
ReadFrom sut = ReadFrom.subnet("192.0.2.0/24", "2001:db8:abcd:0000::/52");
RedisClusterNode nodeInSubnetIpv4 = createNodeWithHost("192.0.2.1");
RedisClusterNode nodeNotInSubnetIpv4 = createNodeWithHost("198.51.100.1");
RedisClusterNode nodeInSubnetIpv6 = createNodeWithHost("2001:db8:abcd:0000::1");
RedisClusterNode nodeNotInSubnetIpv6 = createNodeWithHost("2001:db8:abcd:1000::");
List<RedisNodeDescription> result = sut
.select(getNodes(nodeInSubnetIpv4, nodeNotInSubnetIpv4, nodeInSubnetIpv6, nodeNotInSubnetIpv6));
assertThat(result).hasSize(2).containsExactly(nodeInSubnetIpv4, nodeInSubnetIpv6);
}
@Test
void subnetNodeWithHostname() {
ReadFrom sut = ReadFrom.subnet("0.0.0.0/0");
RedisClusterNode hostNode = createNodeWithHost("example.com");
RedisClusterNode localhostNode = createNodeWithHost("localhost");
List<RedisNodeDescription> result = sut.select(getNodes(hostNode, localhostNode));
assertThat(result).isEmpty();
}
@Test
void subnetCidrValidation() {
// malformed CIDR notation
assertThatThrownBy(() -> ReadFrom.subnet("192.0.2.1//1")).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> ReadFrom.subnet("2001:db8:abcd:0000:://52")).isInstanceOf(IllegalArgumentException.class);
// malformed ipAddress
assertThatThrownBy(() -> ReadFrom.subnet("foo.bar/12")).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> ReadFrom.subnet("zzzz:db8:abcd:0000:://52")).isInstanceOf(IllegalArgumentException.class);
// malformed cidrPrefix
assertThatThrownBy(() -> ReadFrom.subnet("192.0.2.1/40")).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> ReadFrom.subnet("192.0.2.1/foo")).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> ReadFrom.subnet("2001:db8:abcd:0000/129")).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> ReadFrom.subnet("2001:db8:abcd:0000/-1")).isInstanceOf(IllegalArgumentException.class);
// acceptable cidrPrefix
assertDoesNotThrow(() -> ReadFrom.subnet("0.0.0.0/0"));
assertDoesNotThrow(() -> ReadFrom.subnet("0.0.0.0/32"));
assertDoesNotThrow(() -> ReadFrom.subnet("::/0"));
assertDoesNotThrow(() -> ReadFrom.subnet("::/128"));
}
@Test
void regex() {
ReadFrom sut = ReadFrom.regex(Pattern.compile(".*region-1.*"));
RedisClusterNode node1 = createNodeWithHost("redis-node-1.region-1.example.com");
RedisClusterNode node2 = createNodeWithHost("redis-node-2.region-1.example.com");
RedisClusterNode node3 = createNodeWithHost("redis-node-1.region-2.example.com");
RedisClusterNode node4 = createNodeWithHost("redis-node-2.region-2.example.com");
List<RedisNodeDescription> result = sut.select(getNodes(node1, node2, node3, node4));
assertThat(result).hasSize(2).containsExactly(node1, node2);
}
private RedisClusterNode createNodeWithHost(String host) {
RedisClusterNode node = new RedisClusterNode();
node.setUri(RedisURI.Builder.redis(host).build());
return node;
}
@Test
void valueOfNull() {
assertThatThrownBy(() -> ReadFrom.valueOf(null)).isInstanceOf(IllegalArgumentException.class);
}
@Test
void valueOfUnknown() {
assertThatThrownBy(() -> ReadFrom.valueOf("unknown")).isInstanceOf(IllegalArgumentException.class);
}
@ParameterizedTest
@ValueSource(strings = { "NEAREST", "nearest", "Nearest" })
void valueOfNearest(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.NEAREST);
}
@ParameterizedTest
@ValueSource(strings = { "lowestLatency", "lowestlatency", "LOWESTLATENCY" })
void valueOfLowestLatency(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.LOWEST_LATENCY);
}
@ParameterizedTest
@ValueSource(strings = { "MASTER", "master", "Master" })
void valueOfMaster(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.UPSTREAM);
}
@ParameterizedTest
@ValueSource(strings = { "masterPreferred", "masterpreferred", "MASTERPREFERRED" })
void valueOfMasterPreferred(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.UPSTREAM_PREFERRED);
}
@ParameterizedTest
@ValueSource(strings = { "slave", "SLAVE", "Slave" })
void valueOfSlave(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.REPLICA);
}
@ParameterizedTest
@ValueSource(strings = { "slavePreferred", "slavepreferred", "SLAVEPREFERRED" })
void valueOfSlavePreferred(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.REPLICA_PREFERRED);
}
@ParameterizedTest
@ValueSource(strings = { "replicaPreferred", "replicapreferred", "REPLICAPREFERRED" })
void valueOfReplicaPreferred(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.REPLICA_PREFERRED);
}
@ParameterizedTest
@ValueSource(strings = { "anyReplica", "anyreplica", "ANYREPLICA" })
void valueOfAnyReplica(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.ANY_REPLICA);
}
@Test
void valueOfSubnetWithEmptyCidrNotations() {
assertThatThrownBy(() -> ReadFrom.valueOf("subnet")).isInstanceOf(IllegalArgumentException.class);
}
@ParameterizedTest
@ValueSource(strings = { "subnet:192.0.2.0/24,2001:db8:abcd:0000::/52", "SUBNET:192.0.2.0/24,2001:db8:abcd:0000::/52" })
void valueOfSubnet(String name) {
RedisClusterNode nodeInSubnetIpv4 = createNodeWithHost("192.0.2.1");
RedisClusterNode nodeNotInSubnetIpv4 = createNodeWithHost("198.51.100.1");
RedisClusterNode nodeInSubnetIpv6 = createNodeWithHost("2001:db8:abcd:0000::1");
RedisClusterNode nodeNotInSubnetIpv6 = createNodeWithHost("2001:db8:abcd:1000::");
ReadFrom sut = ReadFrom.valueOf(name);
List<RedisNodeDescription> result = sut
.select(getNodes(nodeInSubnetIpv4, nodeNotInSubnetIpv4, nodeInSubnetIpv6, nodeNotInSubnetIpv6));
assertThat(result).hasSize(2).containsExactly(nodeInSubnetIpv4, nodeInSubnetIpv6);
}
@Test
void valueOfRegexWithEmptyRegexValue() {
assertThatThrownBy(() -> ReadFrom.valueOf("regex")).isInstanceOf(IllegalArgumentException.class);
}
@ParameterizedTest
@ValueSource(strings = { "regex:.*region-1.*", "REGEX:.*region-1.*" })
void valueOfRegex(String name) {
ReadFrom sut = ReadFrom.valueOf(name);
RedisClusterNode node1 = createNodeWithHost("redis-node-1.region-1.example.com");
RedisClusterNode node2 = createNodeWithHost("redis-node-2.region-1.example.com");
RedisClusterNode node3 = createNodeWithHost("redis-node-1.region-2.example.com");
RedisClusterNode node4 = createNodeWithHost("redis-node-2.region-2.example.com");
List<RedisNodeDescription> result = sut.select(getNodes(node1, node2, node3, node4));
assertThat(sut).hasFieldOrPropertyWithValue("orderSensitive", false);
assertThat(result).hasSize(2).containsExactly(node1, node2);
}
@ParameterizedTest
@ValueSource(strings = { "REPLICA", "replica", "Replica" })
void valueOfReplica(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.REPLICA);
}
@ParameterizedTest
@ValueSource(strings = { "UPSTREAM", "upstream", "Upstream" })
void valueOfUpstream(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.UPSTREAM);
}
@ParameterizedTest
@ValueSource(strings = { "upstreamPreferred", "UPSTREAMPREFERRED", "UpstreamPreferred" })
void valueOfUpstreamPreferred(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.UPSTREAM_PREFERRED);
}
@Test
void valueOfWhenNameIsPresentButValueIsAbsent() {
assertThatThrownBy(() -> ReadFrom.valueOf("subnet:")).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Value must not be empty for the type 'subnet'");
}
@Test
void valueOfWhenNameIsEmptyButValueIsPresent() {
assertThatThrownBy(() -> ReadFrom.valueOf(":192.0.2.0/24")).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("ReadFrom :192.0.2.0/24 not supported");
}
@Test
void valueOfRegexWithInvalidPatternShouldThrownIllegalArgumentException() {
assertThatThrownBy(() -> ReadFrom.valueOf("regex:\\")).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("is not a valid regular expression");
}
@ParameterizedTest
@ValueSource(strings = { "ANY", "any", "Any" })
void valueOfAny(String name) {
assertThat(ReadFrom.valueOf(name)).isEqualTo(ReadFrom.ANY);
}
private ReadFrom.Nodes getNodes() {
return new ReadFrom.Nodes() {
@Override
public List<RedisNodeDescription> getNodes() {
return (List) sut.getPartitions();
}
@Override
public Iterator<RedisNodeDescription> iterator() {
return getNodes().iterator();
}
};
}
private ReadFrom.Nodes getNodes(RedisNodeDescription... nodes) {
return new ReadFrom.Nodes() {
@Override
public List<RedisNodeDescription> getNodes() {
return Arrays.asList(nodes);
}
@Override
public Iterator<RedisNodeDescription> iterator() {
return getNodes().iterator();
}
};
}
}
| ReadFromUnitTests |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/utils/FunctionTableFactory.java | {
"start": 1516,
"end": 1604
} | class ____ creating a valid {@link FunctionQueryOperation} operation. */
@Internal
final | for |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/view/View.java | {
"start": 1651,
"end": 3124
} | interface ____ {
/**
* The name of the exchange attribute that contains the
* {@link org.springframework.web.reactive.BindingContext BindingContext}
* for the request which can be used to create
* {@link org.springframework.validation.BindingResult BindingResult}
* instances for objects in to the model.
* <p>Note: This attribute is not required and may not be present.
* @since 5.1.8
*/
String BINDING_CONTEXT_ATTRIBUTE = View.class.getName() + ".bindingContext";
/**
* Return the list of media types this View supports, or an empty list.
*/
default List<MediaType> getSupportedMediaTypes() {
return Collections.emptyList();
}
/**
* Whether this View does render by performing a redirect.
*/
default boolean isRedirectView() {
return false;
}
/**
* Render the view based on the given {@link HandlerResult}. Implementations
* can access and use the model or only a specific attribute in it.
* @param model a Map with name Strings as keys and corresponding model
* objects as values (Map can also be {@code null} in case of empty model)
* @param contentType the content type selected to render with which should
* match one of the {@link #getSupportedMediaTypes() supported media types}.
* @param exchange the current exchange
* @return {@code Mono} to represent when and if rendering succeeds
*/
Mono<Void> render(@Nullable Map<String, ?> model, @Nullable MediaType contentType, ServerWebExchange exchange);
}
| View |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AlwaysThrowsTest.java | {
"start": 6264,
"end": 6936
} | class ____ {
ImmutableBiMap<String, Integer> map(String s) {
// BUG: Diagnostic contains:
return ImmutableBiMap.of(s, 1, s, 2);
}
ImmutableBiMap<Integer, String> values(String s) {
// BUG: Diagnostic contains:
return ImmutableBiMap.of(1, s, 2, s);
}
}
""")
.doTest();
}
@Test
public void immutableMapWithComplexKeys() {
testHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.collect.ImmutableMap;
import java.time.Duration;
| Test |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/RequestResponseBodyMethodProcessorTests.java | {
"start": 56876,
"end": 56982
} | interface ____<A> {
default A handle(@RequestBody A arg) {
return arg;
}
}
static | MappingInterface |
java | apache__kafka | core/src/main/java/kafka/server/ReplicationQuotaManager.java | {
"start": 1723,
"end": 5814
} | class ____ implements ReplicaQuota {
public static final List<Integer> ALL_REPLICAS = List.of(-1);
private static final Logger LOGGER = LoggerFactory.getLogger(ReplicationQuotaManager.class);
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final ConcurrentHashMap<String, List<Integer>> throttledPartitions = new ConcurrentHashMap<>();
private final ReplicationQuotaManagerConfig config;
private final Metrics metrics;
private final QuotaType replicationType;
private final Time time;
private final SensorAccess sensorAccess;
private final MetricName rateMetricName;
private Quota quota;
public ReplicationQuotaManager(ReplicationQuotaManagerConfig config, Metrics metrics, QuotaType replicationType, Time time) {
this.config = config;
this.metrics = metrics;
this.replicationType = replicationType;
this.time = time;
this.sensorAccess = new SensorAccess(lock, metrics);
this.rateMetricName = metrics.metricName("byte-rate", replicationType.toString(), "Tracking byte-rate for " + replicationType);
}
/**
* Update the quota
*/
public void updateQuota(Quota quota) {
lock.writeLock().lock();
try {
this.quota = quota;
KafkaMetric metric = metrics.metrics().get(rateMetricName);
if (metric != null) {
metric.config(getQuotaMetricConfig(quota));
}
} finally {
lock.writeLock().unlock();
}
}
/**
* Check if the quota is currently exceeded
*/
@Override
public boolean isQuotaExceeded() {
try {
sensor().checkQuotas();
return false;
} catch (QuotaViolationException qve) {
LOGGER.trace("{}: Quota violated for sensor ({}), metric: ({}), metric-value: ({}), bound: ({})",
replicationType, sensor().name(), qve.metric().metricName(), qve.value(), qve.bound());
return true;
}
}
/**
* Is the passed partition throttled by this ReplicationQuotaManager
*/
@Override
public boolean isThrottled(TopicPartition topicPartition) {
List<Integer> partitions = throttledPartitions.get(topicPartition.topic());
return partitions != null && (partitions.equals(ALL_REPLICAS) || partitions.contains(topicPartition.partition()));
}
/**
* Add the passed value to the throttled rate. This method ignores the quota with
* the value being added to the rate even if the quota is exceeded
*/
@Override
public void record(long value) {
sensor().record((double) value, time.milliseconds(), false);
}
/**
* Update the set of throttled partitions for this QuotaManager. The partitions passed, for
* any single topic, will replace any previous
*/
public void markThrottled(String topic, List<Integer> partitions) {
throttledPartitions.put(topic, partitions);
}
/**
* Mark all replicas for this topic as throttled
*/
public void markThrottled(String topic) {
markThrottled(topic, ALL_REPLICAS);
}
public void removeThrottle(String topic) {
throttledPartitions.remove(topic);
}
public long upperBound() {
lock.readLock().lock();
try {
return quota != null ? (long) quota.bound() : Long.MAX_VALUE;
} finally {
lock.readLock().unlock();
}
}
private MetricConfig getQuotaMetricConfig(Quota quota) {
return new MetricConfig()
.timeWindow(config.quotaWindowSizeSeconds, TimeUnit.SECONDS)
.samples(config.numQuotaSamples)
.quota(quota);
}
private Sensor sensor() {
return sensorAccess.getOrCreate(
replicationType.toString(),
ReplicationQuotaManagerConfig.INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS,
sensor -> sensor.add(rateMetricName, new SimpleRate(), getQuotaMetricConfig(quota))
);
}
}
| ReplicationQuotaManager |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.