language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/TypeEqualsCheckerTest.java
|
{
"start": 874,
"end": 1753
}
|
class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(TypeEqualsChecker.class, getClass());
@Test
public void noMatch() {
testHelper
.addSourceLines(
"ExampleChecker.java",
"""
import com.google.errorprone.BugPattern;
import com.google.errorprone.BugPattern.SeverityLevel;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.bugpatterns.BugChecker.ClassTreeMatcher;
import com.google.errorprone.matchers.Description;
import com.sun.source.tree.ClassTree;
import com.sun.tools.javac.code.Types;
@BugPattern(name = "Example", summary = "", severity = SeverityLevel.ERROR)
public
|
TypeEqualsCheckerTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/size/CriteriaSelectSizeCollectionTest.java
|
{
"start": 2431,
"end": 3067
}
|
class ____ {
@Id
private String id;
private String name;
@ManyToMany(cascade = CascadeType.ALL)
private Collection<Alias> aliases = new ArrayList<>();
public Customer() {
}
public Customer(String id, String name) {
this.id = id;
this.name = name;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void addAlias(Alias alias) {
aliases.add( alias );
}
}
@Entity(name = "Alias")
@Table(name = "ALIAS_TABLE")
public static
|
Customer
|
java
|
grpc__grpc-java
|
stub/src/test/java/io/grpc/stub/ClientCallsTest.java
|
{
"start": 32934,
"end": 34392
}
|
class ____ implements ServerStreamingMethod<Integer, Integer> {
ServerCallStreamObserver<Integer> observer;
@Override public void invoke(Integer request, StreamObserver<Integer> responseObserver) {
observer = (ServerCallStreamObserver<Integer>) responseObserver;
}
}
NoopServerStreamingMethod methodImpl = new NoopServerStreamingMethod();
server = InProcessServerBuilder.forName("noop").directExecutor()
.addService(ServerServiceDefinition.builder("some")
.addMethod(SERVER_STREAMING_METHOD, ServerCalls.asyncServerStreamingCall(methodImpl))
.build())
.build().start();
InterruptInterceptor interceptor = new InterruptInterceptor();
channel = InProcessChannelBuilder.forName("noop")
.directExecutor()
.intercept(interceptor)
.build();
Iterator<Integer> iter = ClientCalls.blockingServerStreamingCall(
channel, SERVER_STREAMING_METHOD, CallOptions.DEFAULT, req);
try {
iter.next();
fail();
} catch (StatusRuntimeException ex) {
assertTrue(Thread.interrupted());
assertTrue("interrupted", ex.getCause() instanceof InterruptedException);
}
assertTrue("onCloseCalled", interceptor.onCloseCalled);
assertTrue("context not cancelled", methodImpl.observer.isCancelled());
}
// Used for blocking tests to check interrupt behavior and make sure onClose is still called.
|
NoopServerStreamingMethod
|
java
|
elastic__elasticsearch
|
modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java
|
{
"start": 922,
"end": 4674
}
|
class ____ extends AbstractTokenizerFactory {
static final String MAX_TOKEN_LENGTH = "max_token_length";
private final Set<Integer> tokenizeOnChars = new HashSet<>();
private final Integer maxTokenLength;
private boolean tokenizeOnSpace = false;
private boolean tokenizeOnLetter = false;
private boolean tokenizeOnDigit = false;
private boolean tokenizeOnPunctuation = false;
private boolean tokenizeOnSymbol = false;
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, CharTokenizer.DEFAULT_MAX_WORD_LEN);
for (final String c : settings.getAsList("tokenize_on_chars")) {
if (c == null || c.length() == 0) {
throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters");
}
if (c.length() == 1) {
tokenizeOnChars.add((int) c.charAt(0));
} else if (c.charAt(0) == '\\') {
tokenizeOnChars.add((int) parseEscapedChar(c));
} else {
switch (c) {
case "letter" -> tokenizeOnLetter = true;
case "digit" -> tokenizeOnDigit = true;
case "whitespace" -> tokenizeOnSpace = true;
case "punctuation" -> tokenizeOnPunctuation = true;
case "symbol" -> tokenizeOnSymbol = true;
default -> throw new RuntimeException("Invalid escaped char in [" + c + "]");
}
}
}
}
private static char parseEscapedChar(final String s) {
int len = s.length();
char c = s.charAt(0);
if (c == '\\') {
if (1 >= len) throw new RuntimeException("Invalid escaped char in [" + s + "]");
c = s.charAt(1);
switch (c) {
case '\\':
return '\\';
case 'n':
return '\n';
case 't':
return '\t';
case 'r':
return '\r';
case 'b':
return '\b';
case 'f':
return '\f';
case 'u':
if (len > 6) {
throw new RuntimeException("Invalid escaped char in [" + s + "]");
}
return (char) Integer.parseInt(s.substring(2), 16);
default:
throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]");
}
} else {
throw new RuntimeException("Invalid escaped char [" + s + "]");
}
}
@Override
public Tokenizer create() {
return new CharTokenizer(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, maxTokenLength) {
@Override
protected boolean isTokenChar(int c) {
if (tokenizeOnSpace && Character.isWhitespace(c)) {
return false;
}
if (tokenizeOnLetter && Character.isLetter(c)) {
return false;
}
if (tokenizeOnDigit && Character.isDigit(c)) {
return false;
}
if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) {
return false;
}
if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) {
return false;
}
return tokenizeOnChars.contains(c) == false;
}
};
}
}
|
CharGroupTokenizerFactory
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/RecipientListExchangePatternOptionTest.java
|
{
"start": 894,
"end": 1339
}
|
class ____ extends SendExchangePatternOptionTest {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").recipientList(constant("stub:foo?exchangePattern=InOnly")).to("mock:result");
from("stub:foo").to("mock:stub");
}
};
}
}
|
RecipientListExchangePatternOptionTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java
|
{
"start": 3915,
"end": 30075
}
|
class ____ extends AbstractLifecycleComponent implements HttpServerTransport {
private static final Logger logger = LogManager.getLogger(AbstractHttpServerTransport.class);
protected final Settings settings;
public final HttpHandlingSettings handlingSettings;
protected final NetworkService networkService;
protected final Recycler<BytesRef> recycler;
protected final ThreadPool threadPool;
protected final Dispatcher dispatcher;
protected final CorsHandler corsHandler;
private final XContentParserConfiguration parserConfig;
protected final PortsRange port;
protected final ByteSizeValue maxContentLength;
private final String[] bindHosts;
private final String[] publishHosts;
private volatile BoundTransportAddress boundAddress;
private final AtomicLong totalChannelsAccepted = new AtomicLong();
private final Map<HttpChannel, RequestTrackingHttpChannel> httpChannels = new ConcurrentHashMap<>();
private final PlainActionFuture<Void> allClientsClosedListener = new PlainActionFuture<>();
private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null));
private final Set<HttpServerChannel> httpServerChannels = ConcurrentCollections.newConcurrentSet();
private final long shutdownGracePeriodMillis;
private final long shutdownPollPeriodMillis;
private final HttpClientStatsTracker httpClientStatsTracker;
private final HttpTracer httpLogger;
private final Tracer tracer;
private final MeterRegistry meterRegistry;
private final List<AutoCloseable> metricsToClose = new ArrayList<>(2);
private volatile boolean shuttingDown;
private final ReadWriteLock shuttingDownRWLock = new StampedLock().asReadWriteLock();
private volatile long slowLogThresholdMs;
protected AbstractHttpServerTransport(
Settings settings,
NetworkService networkService,
Recycler<BytesRef> recycler,
ThreadPool threadPool,
NamedXContentRegistry xContentRegistry,
Dispatcher dispatcher,
ClusterSettings clusterSettings,
TelemetryProvider telemetryProvider
) {
this.settings = settings;
this.networkService = networkService;
this.recycler = recycler;
this.threadPool = threadPool;
this.parserConfig = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry)
.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE);
this.dispatcher = dispatcher;
this.handlingSettings = HttpHandlingSettings.fromSettings(settings);
this.corsHandler = CorsHandler.fromSettings(settings);
// we can't make the network.bind_host a fallback since we already fall back to http.host hence the extra conditional here
List<String> httpBindHost = SETTING_HTTP_BIND_HOST.get(settings);
this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray(
Strings.EMPTY_ARRAY
);
// we can't make the network.publish_host a fallback since we already fall back to http.host hence the extra conditional here
List<String> httpPublishHost = SETTING_HTTP_PUBLISH_HOST.get(settings);
this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost)
.toArray(Strings.EMPTY_ARRAY);
this.port = SETTING_HTTP_PORT.get(settings);
this.maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
this.tracer = telemetryProvider.getTracer();
this.meterRegistry = telemetryProvider.getMeterRegistry();
this.httpLogger = new HttpTracer(settings, clusterSettings);
clusterSettings.addSettingsUpdateConsumer(
TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING,
slowLogThreshold -> this.slowLogThresholdMs = slowLogThreshold.getMillis()
);
slowLogThresholdMs = TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings).getMillis();
httpClientStatsTracker = new HttpClientStatsTracker(settings, clusterSettings, threadPool);
shutdownGracePeriodMillis = SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.get(settings).getMillis();
shutdownPollPeriodMillis = SETTING_HTTP_SERVER_SHUTDOWN_POLL_PERIOD.get(settings).getMillis();
}
public Recycler<BytesRef> recycler() {
return recycler;
}
@Override
public BoundTransportAddress boundAddress() {
return this.boundAddress;
}
@Override
public HttpInfo info() {
BoundTransportAddress boundTransportAddress = boundAddress();
if (boundTransportAddress == null) {
return null;
}
return new HttpInfo(boundTransportAddress, maxContentLength.getBytes());
}
@Override
public HttpStats stats() {
return new HttpStats(
httpChannels.size(),
totalChannelsAccepted.get(),
httpClientStatsTracker.getClientStats(),
dispatcher.getStats()
);
}
protected void bindServer() {
// Bind and start to accept incoming connections.
final InetAddress[] hostAddresses;
try {
hostAddresses = networkService.resolveBindHostAddresses(bindHosts);
} catch (IOException e) {
throw new BindHttpException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e);
}
List<TransportAddress> boundAddresses = new ArrayList<>(hostAddresses.length);
for (InetAddress address : hostAddresses) {
boundAddresses.add(bindAddress(address));
}
final InetAddress publishInetAddress;
try {
publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts);
} catch (Exception e) {
throw new BindTransportException("Failed to resolve publish address", e);
}
final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress);
TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress);
logger.info("{}", boundAddress);
}
private TransportAddress bindAddress(final InetAddress hostAddress) {
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = port.iterate(portNumber -> {
try {
synchronized (httpServerChannels) {
HttpServerChannel httpServerChannel = bind(new InetSocketAddress(hostAddress, portNumber));
httpServerChannels.add(httpServerChannel);
boundSocket.set(httpServerChannel.getLocalAddress());
}
} catch (Exception e) {
lastException.set(e);
return false;
}
return true;
});
if (success == false) {
throw new BindHttpException("Failed to bind to " + NetworkAddress.format(hostAddress, port), lastException.get());
}
if (logger.isDebugEnabled()) {
logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
}
return new TransportAddress(boundSocket.get());
}
protected abstract HttpServerChannel bind(InetSocketAddress hostAddress) throws Exception;
@Override
protected final void doStart() {
metricsToClose.add(
meterRegistry.registerLongAsyncCounter(
"es.http.connections.total",
"total number of inbound HTTP connections accepted",
"count",
() -> new LongWithAttributes(totalChannelsAccepted.get())
)
);
metricsToClose.add(
meterRegistry.registerLongGauge(
"es.http.connections.current",
"number of inbound HTTP connections currently open",
"count",
() -> new LongWithAttributes(httpChannels.size())
)
);
startInternal();
}
protected abstract void startInternal();
/**
* Gracefully shut down. If {@link HttpTransportSettings#SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD} is zero, the default, then
* forcefully close all open connections immediately.
* Serially run through the following steps:
* <ol>
* <li> Stop listening for new HTTP connections, which means no new HttpChannel are added to the {@link #httpChannels} list.
* {@link #serverAcceptedChannel(HttpChannel)} will close any new channels to ensure this is true.
* <li> Close the HttpChannel after a new request completes on all existing channels.
* <li> Close all idle channels.
* <li> If grace period is set, wait for all httpChannels to close via 2 for up to the configured grace period,
* {@link #shutdownGracePeriodMillis}.
* If all connections are closed before the expiration of the grace period, stop waiting early.
* <li> Close all remaining open httpChannels even if requests are in flight.
* </ol>
*/
@Override
protected final void doStop() {
synchronized (httpServerChannels) {
if (httpServerChannels.isEmpty() == false) {
try {
CloseableChannel.closeChannels(new ArrayList<>(httpServerChannels), true);
} catch (Exception e) {
logger.warn("exception while closing channels", e);
} finally {
httpServerChannels.clear();
}
}
}
var wlock = shuttingDownRWLock.writeLock();
try {
wlock.lock();
shuttingDown = true;
refCounted.decRef();
httpChannels.values().forEach(RequestTrackingHttpChannel::setCloseWhenIdle);
} finally {
wlock.unlock();
}
boolean closed = false;
long pollTimeMillis = shutdownPollPeriodMillis;
if (shutdownGracePeriodMillis > 0) {
if (shutdownGracePeriodMillis < pollTimeMillis) {
pollTimeMillis = shutdownGracePeriodMillis;
}
logger.debug(format("waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis));
} else {
logger.debug("waiting indefinitely for clients to close connections");
}
long startPollTimeMillis = System.currentTimeMillis();
do {
try {
FutureUtils.get(allClientsClosedListener, pollTimeMillis, TimeUnit.MILLISECONDS);
closed = true;
} catch (ElasticsearchTimeoutException t) {
logger.info(format("still waiting on %d client connections to close", httpChannels.size()));
if (shutdownGracePeriodMillis > 0) {
long endPollTimeMillis = System.currentTimeMillis();
long remainingGracePeriodMillis = shutdownGracePeriodMillis - (endPollTimeMillis - startPollTimeMillis);
if (remainingGracePeriodMillis <= 0) {
logger.warn(format("timed out while waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis));
break;
} else if (remainingGracePeriodMillis < pollTimeMillis) {
pollTimeMillis = remainingGracePeriodMillis;
}
}
}
} while (closed == false);
if (closed == false) {
try {
CloseableChannel.closeChannels(new ArrayList<>(httpChannels.values()), true);
} catch (Exception e) {
logger.warn("unexpected exception while closing http channels", e);
}
try {
allClientsClosedListener.get();
} catch (Exception e) {
assert false : e;
logger.warn("unexpected exception while waiting for http channels to close", e);
}
}
for (final var metricToClose : metricsToClose) {
try {
metricToClose.close();
} catch (Exception e) {
logger.warn("unexpected exception while closing metric [{}]", metricToClose);
assert false : e;
}
}
stopInternal();
}
boolean isAcceptingConnections() {
return shuttingDown == false;
}
@Override
protected void doClose() {}
/**
* Called to tear down internal resources
*/
protected abstract void stopInternal();
// package private for tests
static int resolvePublishPort(Settings settings, List<TransportAddress> boundAddresses, InetAddress publishInetAddress) {
int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
if (publishPort < 0) {
for (TransportAddress boundAddress : boundAddresses) {
InetAddress boundInetAddress = boundAddress.address().getAddress();
if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
publishPort = boundAddress.getPort();
break;
}
}
}
// if no matching boundAddress found, check if there is a unique port for all bound addresses
if (publishPort < 0) {
final Set<Integer> ports = new HashSet<>();
for (TransportAddress boundAddress : boundAddresses) {
ports.add(boundAddress.getPort());
}
if (ports.size() == 1) {
publishPort = ports.iterator().next();
}
}
if (publishPort < 0) {
throw new BindHttpException(
"Failed to auto-resolve http publish port, multiple bound addresses "
+ boundAddresses
+ " with distinct ports and none of them matched the publish address ("
+ publishInetAddress
+ "). "
+ "Please specify a unique port by setting "
+ SETTING_HTTP_PORT.getKey()
+ " or "
+ SETTING_HTTP_PUBLISH_PORT.getKey()
);
}
return publishPort;
}
public void onException(HttpChannel channel, Exception e) {
try {
if (lifecycle.started() == false) {
// just close and ignore - we are already stopped and just need to make sure we release all resources
return;
}
if (NetworkExceptionHelper.getCloseConnectionExceptionLevel(e, false) != Level.OFF) {
logger.trace(
() -> format("close connection exception caught while handling client http traffic, closing connection %s", channel),
e
);
} else if (NetworkExceptionHelper.isConnectException(e)) {
logger.trace(
() -> format("connect exception caught while handling client http traffic, closing connection %s", channel),
e
);
} else if (e instanceof HttpReadTimeoutException) {
logger.trace(() -> format("http read timeout, closing connection %s", channel), e);
} else if (e instanceof CancelledKeyException) {
logger.trace(
() -> format("cancelled key exception caught while handling client http traffic, closing connection %s", channel),
e
);
} else {
logger.warn(() -> format("caught exception while handling client http traffic, closing connection %s", channel), e);
}
} finally {
CloseableChannel.closeChannel(channel);
}
}
protected static void onServerException(HttpServerChannel channel, Exception e) {
logger.error(() -> "exception from http server channel caught on transport layer [channel=" + channel + "]", e);
}
protected void serverAcceptedChannel(HttpChannel httpChannel) {
var rlock = shuttingDownRWLock.readLock();
try {
rlock.lock();
if (shuttingDown) {
throw new IllegalStateException("Server cannot accept new channel while shutting down");
}
RequestTrackingHttpChannel trackingChannel = httpChannels.putIfAbsent(httpChannel, new RequestTrackingHttpChannel(httpChannel));
assert trackingChannel == null : "Channel should only be added to http channel set once";
} finally {
rlock.unlock();
}
refCounted.incRef();
httpChannel.addCloseListener(ActionListener.running(() -> {
httpChannels.remove(httpChannel);
refCounted.decRef();
}));
totalChannelsAccepted.incrementAndGet();
httpClientStatsTracker.addClientStats(httpChannel);
logger.trace(() -> format("Http channel accepted: %s", httpChannel));
}
/**
* This method handles an incoming http request.
*
* @param httpRequest that is incoming
* @param httpChannel that received the http request
*/
public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) {
httpClientStatsTracker.updateClientStats(httpRequest, httpChannel);
final RequestTrackingHttpChannel trackingChannel = httpChannels.get(httpChannel);
final long startTime = threadPool.rawRelativeTimeInMillis();
try {
// The channel may not be present if the close listener (set in serverAcceptedChannel) runs before this method because the
// connection closed early
if (trackingChannel == null) {
httpRequest.release();
logger.warn(
"http channel [{}] closed before starting to handle [{}][{}][{}]",
httpChannel,
httpRequest.header(Task.X_OPAQUE_ID_HTTP_HEADER),
httpRequest.method(),
httpRequest.uri()
);
return;
}
trackingChannel.incomingRequest();
handleIncomingRequest(httpRequest, trackingChannel, httpRequest.getInboundException());
} finally {
final long took = threadPool.rawRelativeTimeInMillis() - startTime;
networkService.getHandlingTimeTracker().addObservation(took);
final long logThreshold = slowLogThresholdMs;
if (logThreshold > 0 && took > logThreshold) {
logger.warn(
"handling request [{}][{}][{}][{}] took [{}ms] which is above the warn threshold of [{}ms]",
httpRequest.header(Task.X_OPAQUE_ID_HTTP_HEADER),
httpRequest.method(),
httpRequest.uri(),
httpChannel,
took,
logThreshold
);
}
}
}
// Visible for testing
void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) {
final ThreadContext threadContext = threadPool.getThreadContext();
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
if (badRequestCause != null) {
dispatcher.dispatchBadRequest(channel, threadContext, badRequestCause);
} else {
try {
populatePerRequestThreadContext(restRequest, threadContext);
} catch (Exception e) {
try {
dispatcher.dispatchBadRequest(channel, threadContext, e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error(() -> "failed to send failure response for uri [" + restRequest.uri() + "]", inner);
}
return;
}
dispatcher.dispatchRequest(restRequest, channel, threadContext);
}
}
}
protected void populatePerRequestThreadContext(RestRequest restRequest, ThreadContext threadContext) {}
private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) {
if (exception == null) {
HttpResponse earlyResponse = corsHandler.handleInbound(httpRequest);
if (earlyResponse != null) {
httpChannel.sendResponse(earlyResponse, earlyResponseListener(httpRequest, httpChannel));
httpRequest.release();
return;
}
}
Exception badRequestCause = exception;
/*
* We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there
* are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we
* attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header,
* or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the
* underlying exception that caused us to treat the request as bad.
*/
final RestRequest restRequest;
{
RestRequest innerRestRequest;
try {
innerRestRequest = RestRequest.request(parserConfig, httpRequest, httpChannel);
} catch (final RestRequest.MediaTypeHeaderException e) {
badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e);
innerRestRequest = requestWithoutFailedHeader(httpRequest, httpChannel, badRequestCause, e.getFailedHeaderNames());
} catch (final RestRequest.BadParameterException e) {
badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e);
innerRestRequest = RestRequest.requestWithoutParameters(parserConfig, httpRequest, httpChannel);
}
restRequest = innerRestRequest;
}
final HttpTracer maybeHttpLogger = httpLogger.maybeLogRequest(restRequest, exception);
/*
* We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid
* parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an
* IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these
* parameter values.
*/
final RestChannel channel;
{
RestChannel innerChannel;
ThreadContext threadContext = threadPool.getThreadContext();
try {
innerChannel = new DefaultRestChannel(
httpChannel,
httpRequest,
restRequest,
recycler,
handlingSettings,
threadContext,
corsHandler,
maybeHttpLogger,
tracer
);
} catch (final IllegalArgumentException e) {
badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e);
final RestRequest innerRequest = RestRequest.requestWithoutParameters(parserConfig, httpRequest, httpChannel);
innerChannel = new DefaultRestChannel(
httpChannel,
httpRequest,
innerRequest,
recycler,
handlingSettings,
threadContext,
corsHandler,
httpLogger,
tracer
);
}
channel = innerChannel;
}
dispatchRequest(restRequest, channel, badRequestCause);
}
private RestRequest requestWithoutFailedHeader(
HttpRequest httpRequest,
HttpChannel httpChannel,
Exception badRequestCause,
Set<String> failedHeaderNames
) {
assert failedHeaderNames.size() > 0;
HttpRequest httpRequestWithoutHeader = httpRequest;
for (String failedHeaderName : failedHeaderNames) {
httpRequestWithoutHeader = httpRequestWithoutHeader.removeHeader(failedHeaderName);
}
try {
return RestRequest.request(parserConfig, httpRequestWithoutHeader, httpChannel);
} catch (final RestRequest.MediaTypeHeaderException e) {
badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e);
return requestWithoutFailedHeader(httpRequestWithoutHeader, httpChannel, badRequestCause, e.getFailedHeaderNames());
} catch (final RestRequest.BadParameterException e) {
badRequestCause.addSuppressed(e);
return RestRequest.requestWithoutParameters(parserConfig, httpRequestWithoutHeader, httpChannel);
}
}
private static ActionListener<Void> earlyResponseListener(HttpRequest request, HttpChannel httpChannel) {
if (HttpUtils.shouldCloseConnection(request)) {
return ActionListener.running(() -> CloseableChannel.closeChannel(httpChannel));
} else {
return ActionListener.noop();
}
}
public ThreadPool getThreadPool() {
return threadPool;
}
/**
* A {@link HttpChannel} that tracks the number of in-flight requests via a {@link RefCounted}, allowing the channel to be put into a
* state where it will close when idle.
*/
private static
|
AbstractHttpServerTransport
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java
|
{
"start": 7608,
"end": 7675
}
|
class ____ {
}
@IncludeClassNamePatterns("^.*TestCase$")
|
TestCase
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/ElementsSubscribeService.java
|
{
"start": 1088,
"end": 4437
}
|
class ____ {
private static final Logger log = LoggerFactory.getLogger(ElementsSubscribeService.class);
private final Map<Integer, CompletableFuture<?>> subscribeListeners = new ConcurrentHashMap<>();
private final ServiceManager serviceManager;
public ElementsSubscribeService(ServiceManager serviceManager) {
this.serviceManager = serviceManager;
}
public <V> int subscribeOnElements(Supplier<CompletionStage<V>> func, Function<V, CompletionStage<Void>> consumer) {
int id = System.identityHashCode(consumer);
CompletableFuture<?> currentFuture = subscribeListeners.putIfAbsent(id, CompletableFuture.completedFuture(null));
if (currentFuture != null) {
throw new IllegalArgumentException("Consumer object with listener id " + id + " already registered");
}
resubscribe(func, consumer);
return id;
}
@Deprecated
public <V> int subscribeOnElements(Supplier<CompletionStage<V>> func, Consumer<V> consumer) {
int id = System.identityHashCode(consumer);
CompletableFuture<?> currentFuture = subscribeListeners.putIfAbsent(id, CompletableFuture.completedFuture(null));
if (currentFuture != null) {
throw new IllegalArgumentException("Consumer object with listener id " + id + " already registered");
}
resubscribe(func, consumer);
return id;
}
public void unsubscribe(int listenerId) {
CompletableFuture<?> f = subscribeListeners.remove(listenerId);
if (f != null) {
f.cancel(false);
}
}
@Deprecated
private <V> void resubscribe(Supplier<CompletionStage<V>> func, Consumer<V> consumer) {
int listenerId = System.identityHashCode(consumer);
CompletionStage<V> f = (CompletionStage<V>) subscribeListeners.computeIfPresent(listenerId, (k, v) -> {
return func.get().toCompletableFuture();
});
if (f == null) {
return;
}
f.whenComplete((r, e) -> {
if (e != null) {
if (serviceManager.isShuttingDown(e)) {
return;
}
serviceManager.newTimeout(t -> {
resubscribe(func, consumer);
}, 1, TimeUnit.SECONDS);
return;
}
consumer.accept(r);
resubscribe(func, consumer);
});
}
private <V> void resubscribe(Supplier<CompletionStage<V>> func, Function<V, CompletionStage<Void>> consumer) {
int listenerId = System.identityHashCode(consumer);
CompletionStage<V> f = (CompletionStage<V>) subscribeListeners.computeIfPresent(listenerId, (k, v) -> {
return func.get().toCompletableFuture();
});
if (f == null) {
return;
}
f.thenCompose(consumer).whenComplete((r, ex) -> {
if (ex != null) {
if (serviceManager.isShuttingDown(ex)) {
return;
}
log.error(ex.getMessage(), ex);
serviceManager.newTimeout(t -> {
resubscribe(func, consumer);
}, 1, TimeUnit.SECONDS);
return;
}
resubscribe(func, consumer);
});
}
}
|
ElementsSubscribeService
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java
|
{
"start": 9964,
"end": 10704
}
|
class ____ extends TransportResponseHandler.Empty {
private final Runnable onException;
ChildResponseHandler(Runnable onException) {
this.onException = onException;
}
@Override
public Executor executor() {
return TransportResponseHandler.TRANSPORT_WORKER;
}
@Override
public void handleResponse() {
fail("should not get successful response");
}
@Override
public void handleException(TransportException exp) {
assertThat(exp.unwrapCause(), anyOf(instanceOf(TaskCancelledException.class), instanceOf(NodeDisconnectedException.class)));
onException.run();
}
}
}
|
ChildResponseHandler
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNOperator.java
|
{
"start": 5673,
"end": 7334
}
|
class ____ implements Releasable, Accountable {
private static final long BASE_RAM_USAGE = RamUsageEstimator.shallowSizeOfInstance(BytesOrder.class);
private final CircuitBreaker breaker;
final List<SortOrder> sortOrders;
final int[] endOffsets;
BytesOrder(List<SortOrder> sortOrders, CircuitBreaker breaker, String label) {
this.breaker = breaker;
this.sortOrders = sortOrders;
breaker.addEstimateBytesAndMaybeBreak(memoryUsed(sortOrders.size()), label);
this.endOffsets = new int[sortOrders.size()];
}
/**
* Returns true if the byte at the given position is ordered ascending; otherwise, return false
*/
boolean isByteOrderAscending(int bytePosition) {
int index = Arrays.binarySearch(endOffsets, bytePosition);
if (index < 0) {
index = -1 - index;
}
return sortOrders.get(index).asc();
}
private long memoryUsed(int numKeys) {
// sortOrders is global and its memory is accounted at the top level TopNOperator
return BASE_RAM_USAGE + RamUsageEstimator.alignObjectSize(
(long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Integer.BYTES * numKeys
);
}
@Override
public long ramBytesUsed() {
return memoryUsed(sortOrders.size());
}
@Override
public void close() {
breaker.addWithoutBreaking(-ramBytesUsed());
}
}
record KeyFactory(KeyExtractor extractor, boolean ascending) {}
static final
|
BytesOrder
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableWindowBoundary.java
|
{
"start": 1874,
"end": 7757
}
|
class ____<T, B>
extends AtomicInteger
implements Observer<T>, Disposable, Runnable {
private static final long serialVersionUID = 2233020065421370272L;
final Observer<? super Observable<T>> downstream;
final int capacityHint;
final WindowBoundaryInnerObserver<T, B> boundaryObserver;
final AtomicReference<Disposable> upstream;
final AtomicInteger windows;
final MpscLinkedQueue<Object> queue;
final AtomicThrowable errors;
final AtomicBoolean stopWindows;
static final Object NEXT_WINDOW = new Object();
volatile boolean done;
UnicastSubject<T> window;
WindowBoundaryMainObserver(Observer<? super Observable<T>> downstream, int capacityHint) {
this.downstream = downstream;
this.capacityHint = capacityHint;
this.boundaryObserver = new WindowBoundaryInnerObserver<>(this);
this.upstream = new AtomicReference<>();
this.windows = new AtomicInteger(1);
this.queue = new MpscLinkedQueue<>();
this.errors = new AtomicThrowable();
this.stopWindows = new AtomicBoolean();
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.setOnce(upstream, d)) {
innerNext();
}
}
@Override
public void onNext(T t) {
queue.offer(t);
drain();
}
@Override
public void onError(Throwable e) {
boundaryObserver.dispose();
if (errors.tryAddThrowableOrReport(e)) {
done = true;
drain();
}
}
@Override
public void onComplete() {
boundaryObserver.dispose();
done = true;
drain();
}
@Override
public void dispose() {
if (stopWindows.compareAndSet(false, true)) {
boundaryObserver.dispose();
if (windows.decrementAndGet() == 0) {
DisposableHelper.dispose(upstream);
}
}
}
@Override
public boolean isDisposed() {
return stopWindows.get();
}
@Override
public void run() {
if (windows.decrementAndGet() == 0) {
DisposableHelper.dispose(upstream);
}
}
void innerNext() {
queue.offer(NEXT_WINDOW);
drain();
}
void innerError(Throwable e) {
DisposableHelper.dispose(upstream);
if (errors.tryAddThrowableOrReport(e)) {
done = true;
drain();
}
}
void innerComplete() {
DisposableHelper.dispose(upstream);
done = true;
drain();
}
@SuppressWarnings("unchecked")
void drain() {
if (getAndIncrement() != 0) {
return;
}
int missed = 1;
Observer<? super Observable<T>> downstream = this.downstream;
MpscLinkedQueue<Object> queue = this.queue;
AtomicThrowable errors = this.errors;
for (;;) {
for (;;) {
if (windows.get() == 0) {
queue.clear();
window = null;
return;
}
UnicastSubject<T> w = window;
boolean d = done;
if (d && errors.get() != null) {
queue.clear();
Throwable ex = errors.terminate();
if (w != null) {
window = null;
w.onError(ex);
}
downstream.onError(ex);
return;
}
Object v = queue.poll();
boolean empty = v == null;
if (d && empty) {
Throwable ex = errors.terminate();
if (ex == null) {
if (w != null) {
window = null;
w.onComplete();
}
downstream.onComplete();
} else {
if (w != null) {
window = null;
w.onError(ex);
}
downstream.onError(ex);
}
return;
}
if (empty) {
break;
}
if (v != NEXT_WINDOW) {
w.onNext((T)v);
continue;
}
if (w != null) {
window = null;
w.onComplete();
}
if (!stopWindows.get()) {
w = UnicastSubject.create(capacityHint, this);
window = w;
windows.getAndIncrement();
ObservableWindowSubscribeIntercept<T> intercept = new ObservableWindowSubscribeIntercept<>(w);
downstream.onNext(intercept);
if (intercept.tryAbandon()) {
w.onComplete();
}
}
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
static final
|
WindowBoundaryMainObserver
|
java
|
apache__camel
|
components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/MockEndpointTest.java
|
{
"start": 4780,
"end": 5061
}
|
class ____ extends RouteBuilder {
@Override
public void configure() throws Exception {
from("direct:start").to("direct:foo").to("log:foo").to("mock:result");
from("direct:foo").transform(constant("Bye World"));
}
}
}
|
MyRouteBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityWithFlsRestIT.java
|
{
"start": 642,
"end": 10790
}
|
class ____ extends AbstractRemoteClusterSecurityDlsAndFlsRestIT {
private static final String REMOTE_CLUSTER_FLS = REMOTE_CLUSTER_ALIAS + "_fls";
private static final AtomicReference<Map<String, Object>> API_KEY_REFERENCE = new AtomicReference<>();
private static final String API_KEY_ACCESS = """
{
"search": [
{
"names": ["remote_index2"],
"field_security": {"grant": [ "field2" ]}
},
{
"names": ["remote_index2"],
"field_security": {"grant": [ "field3" ]}
}
]
}""";
static {
fulfillingCluster = ElasticsearchCluster.local()
.name("fulfilling-cluster")
.apply(commonClusterConfig)
.setting("remote_cluster_server.enabled", "true")
.setting("remote_cluster.port", "0")
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key")
.setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt")
.keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password")
.build();
queryCluster = ElasticsearchCluster.local()
.name("query-cluster")
.apply(commonClusterConfig)
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
.keystore(
"cluster.remote." + REMOTE_CLUSTER_FLS + ".credentials",
() -> createCrossClusterAccessApiKey(API_KEY_ACCESS, API_KEY_REFERENCE)
)
.build();
}
@ClassRule
public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster);
public void testCrossClusterSearchWithFls() throws Exception {
setupRemoteClusterTestCase(REMOTE_CLUSTER_FLS);
final Request searchRequest = new Request(
"GET",
Strings.format(
"/%s:%s/_search?ccs_minimize_roundtrips=%s",
REMOTE_CLUSTER_FLS,
randomFrom("remote_index*", "*"),
randomBoolean()
)
);
// Running a CCS request with a user with DLS and FLS should be intersected with cross cluster API key's DLS and FLS permissions.
{
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithUser(searchRequest, REMOTE_SEARCH_USER_NO_DLS_FLS),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2", "field3")))
);
// API key with owner's permissions should return the same result.
final String apiKeyNoDlsFls = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_NO_DLS_FLS, "{}").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyNoDlsFls),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2", "field3")))
);
// API key's role restrictions should be respected.
String apiKeyNoDlsFlsRestricted = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_NO_DLS_FLS, """
{
"role1": {
"remote_indices": [
{
"names": ["remote_index3"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"]
}
]
}
}
""").v2();
assertSearchResponseContainsEmptyResult(performRequestWithApiKey(searchRequest, apiKeyNoDlsFlsRestricted));
}
// Running a CCS request with a user with DLS and FLS.
{
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithUser(searchRequest, REMOTE_SEARCH_USER_DLS_FLS),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2")))
);
// API key with owner's permissions should return the same result.
final String apiKeyDlsFls = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_DLS_FLS, "{}").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyDlsFls),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2")))
);
// API key's role restrictions should be respected.
String apiKeyDlsFlsRestricted = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_DLS_FLS, """
{
"role1": {
"remote_indices": [
{
"names": ["*"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"],
"query": {"bool": {"must_not": {"term": {"field1": "value4"}}}},
"field_security": {"grant": ["field1"]}
}
]
}
}
""").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyDlsFlsRestricted),
Map.ofEntries(Map.entry("remote_index2", Set.of()))
);
}
// Running a CCS request with a user with DLS only.
{
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithUser(searchRequest, REMOTE_SEARCH_USER_DLS),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2", "field3")))
);
// API key with owner's permissions should return the same search result.
final String apiKeyDls = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_DLS, "{}").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyDls),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2", "field3")))
);
// API key's role restrictions should be respected.
String apiKeyDlsRestricted = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_DLS, """
{
"role1": {
"remote_indices": [
{
"names": ["remote_index*"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"],
"query": {"bool": {"must": {"term": {"field1": "value4"}}}},
"field_security": {"grant": ["field1"]}
}
]
},
"role2": {
"remote_indices": [
{
"names": ["remote_index2", "remote_index3"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"],
"query": {"bool": {"must_not": {"term": {"field1": "value3"}}}},
"field_security": {"grant": ["field2"]}
}
]
}
}
""").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyDlsRestricted),
Map.ofEntries(Map.entry("remote_index2", Set.of("field2")))
);
}
// Running a CCS request with a user with FLS only.
{
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithUser(searchRequest, REMOTE_SEARCH_USER_FLS),
Map.ofEntries(Map.entry("remote_index2", Set.of("field3")))
);
// API key with owner's permissions should return the same result.
final String apiKeyFls = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_FLS, "{}").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyFls),
Map.ofEntries(Map.entry("remote_index2", Set.of("field3")))
);
// API key's role restrictions should be respected. In this case no fields should be returned.
String apiKeyFlsRestricted = createRemoteSearchApiKeyWithUser(REMOTE_SEARCH_USER_FLS, """
{
"role1": {
"remote_indices": [
{
"names": ["*"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"],
"query": {"bool": {"must_not": {"term": {"field1": "value4"}}}},
"field_security": {"grant": ["field1"]}
}
]
},
"role2": {
"remote_indices": [
{
"names": ["*2", "*3"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["*"],
"query": {"bool": {"should": [{"term": {"field1": "value3"}}]}},
"field_security": {"grant": ["field2"]}
}
]
}
}
""").v2();
assertSearchResponseContainsExpectedIndicesAndFields(
performRequestWithApiKey(searchRequest, apiKeyFlsRestricted),
Map.ofEntries(Map.entry("remote_index2", Set.of()))
);
}
}
}
|
RemoteClusterSecurityWithFlsRestIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/NumericTypeCategory.java
|
{
"start": 269,
"end": 1211
}
|
enum ____ {
INTEGER,
LONG,
BIG_INTEGER,
DOUBLE,
FLOAT,
BIG_DECIMAL;
public <N extends Number> N parseLiteralValue(String value) {
switch ( this ) {
case INTEGER: {
//noinspection unchecked
return (N) Integer.valueOf( value );
}
case LONG: {
//noinspection unchecked
return (N) Long.valueOf( value );
}
case BIG_INTEGER: {
//noinspection unchecked
return (N) new BigInteger( value );
}
case DOUBLE: {
//noinspection unchecked
return (N) Double.valueOf( value );
}
case FLOAT: {
//noinspection unchecked
return (N) Float.valueOf( value );
}
case BIG_DECIMAL: {
//noinspection unchecked
return (N) new BigDecimal( value );
}
default: {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"Unable to parse numeric literal value `%s` - %s",
value,
name()
)
);
}
}
}
}
|
NumericTypeCategory
|
java
|
netty__netty
|
codec-socks/src/main/java/io/netty/handler/codec/socksx/AbstractSocksMessage.java
|
{
"start": 826,
"end": 1212
}
|
class ____ implements SocksMessage {
private DecoderResult decoderResult = DecoderResult.SUCCESS;
@Override
public DecoderResult decoderResult() {
return decoderResult;
}
@Override
public void setDecoderResult(DecoderResult decoderResult) {
this.decoderResult = ObjectUtil.checkNotNull(decoderResult, "decoderResult");
}
}
|
AbstractSocksMessage
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/javadoc/InvalidParamTest.java
|
{
"start": 3147,
"end": 3406
}
|
interface ____ {
/**
* @param <S> baz
* @param a bar
*/
<T> void foo(int a);
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/type/AnnotatedClassTest.java
|
{
"start": 339,
"end": 553
}
|
class ____
{
/*
/**********************************************************
/* Annotated helper classes
/**********************************************************
*/
static
|
AnnotatedClassTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/LongVarcharJdbcType.java
|
{
"start": 426,
"end": 1798
}
|
class ____ extends VarcharJdbcType {
public static final LongVarcharJdbcType INSTANCE = new LongVarcharJdbcType();
private final int defaultSqlTypeCode;
public LongVarcharJdbcType() {
this( Types.LONGVARCHAR );
}
public LongVarcharJdbcType(int defaultSqlTypeCode) {
this.defaultSqlTypeCode = defaultSqlTypeCode;
}
@Override
public String toString() {
return "LongVarcharTypeDescriptor";
}
@Override
public int getJdbcTypeCode() {
return Types.LONGVARCHAR;
}
@Override
public int getDefaultSqlTypeCode() {
return defaultSqlTypeCode;
}
@Override
public JdbcType resolveIndicatedType(
JdbcTypeIndicators indicators,
JavaType<?> domainJtd) {
assert domainJtd != null;
final var typeConfiguration = indicators.getTypeConfiguration();
final JdbcTypeRegistry jdbcTypeRegistry = typeConfiguration.getJdbcTypeRegistry();
final int jdbcTypeCode;
if ( indicators.isLob() ) {
jdbcTypeCode = indicators.isNationalized() ? Types.NCLOB : Types.CLOB;
}
else if ( shouldUseMaterializedLob( indicators ) ) {
jdbcTypeCode = indicators.isNationalized() ? SqlTypes.MATERIALIZED_NCLOB : SqlTypes.MATERIALIZED_CLOB;
}
else {
jdbcTypeCode = indicators.isNationalized() ? Types.LONGNVARCHAR : Types.LONGVARCHAR;
}
return jdbcTypeRegistry.getDescriptor( indicators.resolveJdbcTypeCode( jdbcTypeCode ) );
}
}
|
LongVarcharJdbcType
|
java
|
square__javapoet
|
src/test/java/com/squareup/javapoet/JavaFileTest.java
|
{
"start": 19927,
"end": 20410
}
|
class ____ {\n"
+ "}\n");
}
@Test public void conflictingTypeVariableBound() throws Exception {
String source = JavaFile.builder("com.squareup.tacos",
TypeSpec.classBuilder("Taco")
.addTypeVariable(
TypeVariableName.get("T", ClassName.get("com.taco.bell", "Taco")))
.build())
.build()
.toString();
assertThat(source).isEqualTo(""
+ "package com.squareup.tacos;\n"
+ "\n"
+ "
|
Taco
|
java
|
quarkusio__quarkus
|
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftWithDockerAndImageTest.java
|
{
"start": 769,
"end": 3316
}
|
class ____ {
private static final String APP_NAME = "openshift-with-docker-and-image";
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName(APP_NAME)
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource(APP_NAME + ".properties")
.overrideConfigKey("quarkus.openshift.deployment-kind", "deployment-config")
.setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-openshift", Version.getVersion())));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir).isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml"));
List<HasMetadata> openshiftList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
assertThat(openshiftList).filteredOn(h -> "DeploymentConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(APP_NAME);
});
assertThat(h).isInstanceOfSatisfying(DeploymentConfig.class, d -> {
Container container = d.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(container.getImage()).isEqualTo("quay.io/user/app:1.0");
DeploymentTriggerImageChangeParams imageTriggerParams = d.getSpec().getTriggers().get(0).getImageChangeParams();
assertThat(imageTriggerParams.getFrom().getKind()).isEqualTo("ImageStreamTag");
assertThat(imageTriggerParams.getFrom().getName()).isEqualTo(APP_NAME + ":1.0");
});
});
assertThat(openshiftList).filteredOn(h -> "ImageStream".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(APP_NAME);
});
assertThat(h).isInstanceOfSatisfying(ImageStream.class, i -> {
assertThat(i.getSpec().getDockerImageRepository()).isEqualTo("quay.io/user/app");
});
});
}
}
|
OpenshiftWithDockerAndImageTest
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java
|
{
"start": 1933,
"end": 16853
}
|
class ____ extends ESIntegTestCase {
public void testExternalVersioningInitialDelete() throws Exception {
createIndex("test");
ensureGreen();
// Note - external version doesn't throw version conflicts on deletes of non existent records.
// This is different from internal versioning
DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get();
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
// this should conflict with the delete command transaction which told us that the object was deleted at version 17.
assertFutureThrows(
prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
VersionConflictEngineException.class
);
DocWriteResponse indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(18)
.setVersionType(VersionType.EXTERNAL)
.get();
assertThat(indexResponse.getVersion(), equalTo(18L));
}
public void testExternalGTE() throws Exception {
createIndex("test");
DocWriteResponse indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(12)
.setVersionType(VersionType.EXTERNAL_GTE)
.get();
assertThat(indexResponse.getVersion(), equalTo(12L));
indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_2")
.setVersion(12)
.setVersionType(VersionType.EXTERNAL_GTE)
.get();
assertThat(indexResponse.getVersion(), equalTo(12L));
indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_2")
.setVersion(14)
.setVersionType(VersionType.EXTERNAL_GTE)
.get();
assertThat(indexResponse.getVersion(), equalTo(14L));
RequestBuilder<?, ?> builder1 = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(13)
.setVersionType(VersionType.EXTERNAL_GTE);
expectThrows(VersionConflictEngineException.class, builder1);
client().admin().indices().prepareRefresh().get();
if (randomBoolean()) {
refresh();
}
for (int i = 0; i < 10; i++) {
assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(14L));
}
// deleting with a lower version fails.
RequestBuilder<?, ?> builder = client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE);
expectThrows(VersionConflictEngineException.class, builder);
// Delete with a higher or equal version deletes all versions up to the given one.
long v = randomIntBetween(14, 17);
DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(v).setVersionType(VersionType.EXTERNAL_GTE).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
assertThat(deleteResponse.getVersion(), equalTo(v));
// Deleting with a lower version keeps on failing after a delete.
assertFutureThrows(
client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE).execute(),
VersionConflictEngineException.class
);
// But delete with a higher version is OK.
deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).get();
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
assertThat(deleteResponse.getVersion(), equalTo(18L));
}
public void testExternalVersioning() throws Exception {
createIndex("test");
ensureGreen();
DocWriteResponse indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(12)
.setVersionType(VersionType.EXTERNAL)
.get();
assertThat(indexResponse.getVersion(), equalTo(12L));
indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(14)
.setVersionType(VersionType.EXTERNAL)
.get();
assertThat(indexResponse.getVersion(), equalTo(14L));
assertFutureThrows(
prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
VersionConflictEngineException.class
);
if (randomBoolean()) {
refresh();
}
for (int i = 0; i < 10; i++) {
assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(14L));
}
// deleting with a lower version fails.
assertFutureThrows(
client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
VersionConflictEngineException.class
);
// Delete with a higher version deletes all versions up to the given one.
DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
assertThat(deleteResponse.getVersion(), equalTo(17L));
// Deleting with a lower version keeps on failing after a delete.
assertFutureThrows(
client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
VersionConflictEngineException.class
);
// But delete with a higher version is OK.
deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).get();
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
assertThat(deleteResponse.getVersion(), equalTo(18L));
// TODO: This behavior breaks rest api returning http status 201
// good news is that it this is only the case until deletes GC kicks in.
indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(19)
.setVersionType(VersionType.EXTERNAL)
.get();
assertThat(indexResponse.getVersion(), equalTo(19L));
deleteResponse = client().prepareDelete("test", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
assertThat(deleteResponse.getVersion(), equalTo(20L));
// Make sure that the next delete will be GC. Note we do it on the index settings so it will be cleaned up
updateIndexSettings(Settings.builder().put("index.gc_deletes", -1), "test");
Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance...
// And now we have previous version return -1
indexResponse = prepareIndex("test").setId("1")
.setSource("field1", "value1_1")
.setVersion(20)
.setVersionType(VersionType.EXTERNAL)
.get();
assertThat(indexResponse.getVersion(), equalTo(20L));
}
public void testRequireUnitsOnUpdateSettings() throws Exception {
createIndex("test");
ensureGreen();
HashMap<String, Object> newSettings = new HashMap<>();
newSettings.put("index.gc_deletes", "42");
try {
client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).get();
fail("did not hit expected exception");
} catch (IllegalArgumentException iae) {
// expected
assertTrue(
iae.getMessage()
.contains("failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized")
);
}
}
public void testCompareAndSetInitialDelete() throws Exception {
createIndex("test");
ensureGreen();
assertFutureThrows(
client().prepareDelete("test", "1").setIfSeqNo(17).setIfPrimaryTerm(10).execute(),
VersionConflictEngineException.class
);
DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").setCreate(true).get();
assertThat(indexResponse.getVersion(), equalTo(1L));
}
public void testCompareAndSet() {
createIndex("test");
ensureGreen();
DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get();
assertThat(indexResponse.getSeqNo(), equalTo(0L));
assertThat(indexResponse.getPrimaryTerm(), equalTo(1L));
indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0L).setIfPrimaryTerm(1).get();
assertThat(indexResponse.getSeqNo(), equalTo(1L));
assertThat(indexResponse.getPrimaryTerm(), equalTo(1L));
assertFutureThrows(
prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(1).execute(),
VersionConflictEngineException.class
);
assertFutureThrows(
prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(2).execute(),
VersionConflictEngineException.class
);
assertFutureThrows(
prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(1).setIfPrimaryTerm(2).execute(),
VersionConflictEngineException.class
);
RequestBuilder<?, ?> builder6 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(1);
expectThrows(VersionConflictEngineException.class, builder6);
RequestBuilder<?, ?> builder5 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(2);
expectThrows(VersionConflictEngineException.class, builder5);
RequestBuilder<?, ?> builder4 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2);
expectThrows(VersionConflictEngineException.class, builder4);
client().admin().indices().prepareRefresh().get();
for (int i = 0; i < 10; i++) {
final GetResponse response = client().prepareGet("test", "1").get();
assertThat(response.getSeqNo(), equalTo(1L));
assertThat(response.getPrimaryTerm(), equalTo(1L));
}
// search with versioning
for (int i = 0; i < 10; i++) {
// TODO: ADD SEQ NO!
assertResponse(
prepareSearch().setQuery(matchAllQuery()).setVersion(true),
response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L))
);
}
// search without versioning
for (int i = 0; i < 10; i++) {
assertResponse(
prepareSearch().setQuery(matchAllQuery()),
response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(Versions.NOT_FOUND))
);
}
DeleteResponse deleteResponse = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
assertThat(deleteResponse.getSeqNo(), equalTo(2L));
assertThat(deleteResponse.getPrimaryTerm(), equalTo(1L));
RequestBuilder<?, ?> builder3 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1);
expectThrows(VersionConflictEngineException.class, builder3);
RequestBuilder<?, ?> builder2 = client().prepareDelete("test", "1").setIfSeqNo(3).setIfPrimaryTerm(12);
expectThrows(VersionConflictEngineException.class, builder2);
RequestBuilder<?, ?> builder1 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2);
expectThrows(VersionConflictEngineException.class, builder1);
// the doc is deleted. Even when we hit the deleted seqNo, a conditional delete should fail.
RequestBuilder<?, ?> builder = client().prepareDelete("test", "1").setIfSeqNo(2).setIfPrimaryTerm(1);
expectThrows(VersionConflictEngineException.class, builder);
}
public void testSimpleVersioningWithFlush() throws Exception {
createIndex("test");
ensureGreen();
DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get();
assertThat(indexResponse.getSeqNo(), equalTo(0L));
client().admin().indices().prepareFlush().get();
indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0).setIfPrimaryTerm(1).get();
assertThat(indexResponse.getSeqNo(), equalTo(1L));
client().admin().indices().prepareFlush().get();
RequestBuilder<?, ?> builder2 = prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1);
expectThrows(VersionConflictEngineException.class, builder2);
RequestBuilder<?, ?> builder1 = prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1");
expectThrows(VersionConflictEngineException.class, builder1);
RequestBuilder<?, ?> builder = client().prepareDelete("test", "1").setIfSeqNo(0).setIfPrimaryTerm(1);
expectThrows(VersionConflictEngineException.class, builder);
for (int i = 0; i < 10; i++) {
assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(2L));
}
client().admin().indices().prepareRefresh().get();
for (int i = 0; i < 10; i++) {
assertResponse(prepareSearch().setQuery(matchAllQuery()).setVersion(true).seqNoAndPrimaryTerm(true), response -> {
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L));
assertThat(response.getHits().getAt(0).getSeqNo(), equalTo(1L));
});
}
}
public void testVersioningWithBulk() {
createIndex("test");
ensureGreen();
BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("test").setId("1").setSource("field1", "value1_1")).get();
assertThat(bulkResponse.hasFailures(), equalTo(false));
assertThat(bulkResponse.getItems().length, equalTo(1));
IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
assertThat(indexResponse.getVersion(), equalTo(1L));
}
// Poached from Lucene's TestIDVersionPostingsFormat:
private
|
SimpleVersioningIT
|
java
|
google__guice
|
core/test/com/google/inject/internal/ProxyFactoryTest.java
|
{
"start": 3552,
"end": 3681
}
|
class ____ {
boolean fooCalled;
@Intercept
protected void foo() {
fooCalled = true;
}
}
public static
|
Foo
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/filter/subclass/joined2/JoinedInheritanceFilterTest.java
|
{
"start": 1369,
"end": 3110
}
|
class ____ implements SessionFactoryScopeAware {
private SessionFactoryScope scope;
@Override
public void injectSessionFactoryScope(SessionFactoryScope scope) {
this.scope = scope;
}
@ParameterizedTest
@MethodSource("transactionKind")
void test(BiConsumer<SessionFactoryScope, Consumer<? extends SharedSessionContract>> inTransaction,
TriFunction<SharedSessionContract, Class<?>, Object, Object> find) {
inTransaction.accept( scope, s -> {
//noinspection deprecation
s.createQuery( "SELECT o FROM Owner o INNER JOIN FETCH o.dog d WHERE o.id = 1" ).getResultList();
s.enableFilter( "companyFilter" ).setParameter( "companyIdParam", 2l ).validate();
//noinspection deprecation
s.createQuery( "SELECT o FROM Owner o INNER JOIN FETCH o.dog d WHERE o.id = 1" ).getResultList();
//noinspection deprecation
s.createQuery( "FROM Animal" ).getResultList();
//noinspection deprecation
s.createQuery( "FROM Dog" ).getResultList();
assertNull( find.apply( s, Owner.class, 1 ) );
assertNull( find.apply( s, Animal.class, 1 ) );
assertNull( find.apply( s, Dog.class, 1 ) );
} );
}
List<? extends Arguments> transactionKind() {
// We want to test both regular and stateless session:
BiConsumer<SessionFactoryScope, Consumer<SessionImplementor>> kind1 = SessionFactoryScope::inTransaction;
TriFunction<Session, Class<?>, Object, Object> find1 = Session::find;
BiConsumer<SessionFactoryScope, Consumer<StatelessSessionImplementor>> kind2 = SessionFactoryScope::inStatelessTransaction;
TriFunction<StatelessSession, Class<?>, Object, Object> find2 = StatelessSession::get;
return List.of(
Arguments.of( kind1, find1 ),
Arguments.of( kind2, find2 )
);
}
}
|
JoinedInheritanceFilterTest
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/support/DirtiesContextTestExecutionListenerTests.java
|
{
"start": 16615,
"end": 16914
}
|
interface ____ {
@AliasFor(annotation = DirtiesContext.class)
ClassMode classMode() default AFTER_EACH_TEST_METHOD;
@AliasFor(annotation = DirtiesContext.class)
HierarchyMode hierarchyMode() default HierarchyMode.CURRENT_LEVEL;
}
@MetaDirtyAfterEachTestMethod
static
|
MetaDirtyWithOverrides
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/bigdecimal/BigDecimalAssert_isOne_Test.java
|
{
"start": 901,
"end": 1214
}
|
class ____ extends BigDecimalAssertBaseTest {
@Override
protected BigDecimalAssert invoke_api_method() {
return assertions.isOne();
}
@Override
protected void verify_internal_effects() {
verify(bigDecimals).assertIsOne(getInfo(assertions), getActual(assertions));
}
}
|
BigDecimalAssert_isOne_Test
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/convert/StringToFloatConverterTest.java
|
{
"start": 1312,
"end": 1929
}
|
class ____ {
private StringToFloatConverter converter;
@BeforeEach
public void init() {
converter = (StringToFloatConverter) getExtensionLoader(Converter.class).getExtension("string-to-float");
}
@Test
void testAccept() {
assertTrue(converter.accept(String.class, Float.class));
}
@Test
void testConvert() {
assertEquals(Float.valueOf("1.0"), converter.convert("1.0"));
assertNull(converter.convert(null));
assertThrows(NumberFormatException.class, () -> {
converter.convert("ttt");
});
}
}
|
StringToFloatConverterTest
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/templatelocator/BlankLocateValueTest.java
|
{
"start": 1226,
"end": 1413
}
|
class ____ implements TemplateLocator {
@Override
public Optional<TemplateLocation> locate(String s) {
return Optional.empty();
}
}
}
|
CustomLocator
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/encoding/EncodingTest.java
|
{
"start": 1126,
"end": 2398
}
|
class ____ {
protected static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/encoding/EncodingConfig.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
Charset charset = Resources.getCharset();
try {
// make sure that the SQL file has been saved in UTF-8!
Resources.setCharset(Charset.forName("utf-8"));
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/encoding/CreateDB.sql");
} finally {
Resources.setCharset(charset);
}
}
@Test
void encoding1() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
EncodingMapper mapper = sqlSession.getMapper(EncodingMapper.class);
String answer = mapper.select1();
assertEquals("Mara\u00f1\u00f3n", answer);
}
}
@Test
void encoding2() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
EncodingMapper mapper = sqlSession.getMapper(EncodingMapper.class);
String answer = mapper.select2();
assertEquals("Mara\u00f1\u00f3n", answer);
}
}
}
|
EncodingTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/sort/SortAndFormats.java
|
{
"start": 608,
"end": 1034
}
|
class ____ {
public final Sort sort;
public final DocValueFormat[] formats;
public SortAndFormats(Sort sort, DocValueFormat[] formats) {
if (sort.getSort().length != formats.length) {
throw new IllegalArgumentException("Number of sort field mismatch: " + sort.getSort().length + " != " + formats.length);
}
this.sort = sort;
this.formats = formats;
}
}
|
SortAndFormats
|
java
|
google__guice
|
core/src/com/google/inject/internal/BytecodeGen.java
|
{
"start": 2155,
"end": 2603
}
|
class ____ {
/** Returns true if the given object is a circular proxy. */
public static boolean isCircularProxy(Object object) {
return object instanceof Proxy
&& Proxy.getInvocationHandler(object) instanceof DelegatingInvocationHandler;
}
/** Creates a new circular proxy for the given type. */
static <T> T newCircularProxy(Class<T> type, DelegatingInvocationHandler handler) {
// Ideally we would add a marker
|
BytecodeGen
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/ingest/IngestService.java
|
{
"start": 5306,
"end": 5360
}
|
class ____ several ingest related services.
*/
public
|
for
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java
|
{
"start": 1321,
"end": 6595
}
|
class ____ extends ESTokenStreamTestCase {
private static final AnalysisPlugin MOCK_ANALYSIS_PLUGIN = new MockAnalysisPlugin();
public void testBasics() throws IOException {
Settings settings = Settings.builder()
.putList("index.analysis.normalizer.my_normalizer.filter", "lowercase")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, MOCK_ANALYSIS_PLUGIN);
assertNull(analysis.indexAnalyzers.get("my_normalizer"));
NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer");
assertNotNull(normalizer);
assertEquals("my_normalizer", normalizer.name());
assertTokenStreamContents(normalizer.tokenStream("foo", "Cet été-là"), new String[] { "cet été-là" });
assertEquals(new BytesRef("cet été-là"), normalizer.normalize("foo", "Cet été-là"));
normalizer = analysis.indexAnalyzers.getWhitespaceNormalizer("my_normalizer");
assertNotNull(normalizer);
assertEquals("my_normalizer", normalizer.name());
assertTokenStreamContents(normalizer.tokenStream("foo", "Cet été-là"), new String[] { "cet", "été-là" });
assertEquals(new BytesRef("cet été-là"), normalizer.normalize("foo", "Cet été-là"));
}
public void testUnknownType() {
Settings settings = Settings.builder()
.put("index.analysis.normalizer.my_normalizer.type", "foobar")
.putList("index.analysis.normalizer.my_normalizer.filter", "lowercase", "asciifolding")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)
);
assertEquals("Unknown normalizer type [foobar] for [my_normalizer]", e.getMessage());
}
public void testTokenizer() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.normalizer.my_normalizer.tokenizer", "keyword")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, MOCK_ANALYSIS_PLUGIN)
);
assertEquals("Custom normalizer [my_normalizer] cannot configure a tokenizer", e.getMessage());
}
public void testCharFilters() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.char_filter.my_mapping.type", "mock_char_filter")
.putList("index.analysis.normalizer.my_normalizer.char_filter", "my_mapping")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, MOCK_ANALYSIS_PLUGIN);
assertNull(analysis.indexAnalyzers.get("my_normalizer"));
NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer");
assertNotNull(normalizer);
assertEquals("my_normalizer", normalizer.name());
assertTokenStreamContents(normalizer.tokenStream("foo", "abc acd"), new String[] { "zbc zcd" });
assertEquals(new BytesRef("zbc"), normalizer.normalize("foo", "abc"));
normalizer = analysis.indexAnalyzers.getWhitespaceNormalizer("my_normalizer");
assertNotNull(normalizer);
assertEquals("my_normalizer", normalizer.name());
assertTokenStreamContents(normalizer.tokenStream("foo", "abc acd"), new String[] { "zbc", "zcd" });
assertEquals(new BytesRef("zbc"), normalizer.normalize("foo", "abc"));
}
public void testIllegalFilters() throws IOException {
Settings settings = Settings.builder()
.putList("index.analysis.normalizer.my_normalizer.filter", "mock_forbidden")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, MOCK_ANALYSIS_PLUGIN)
);
assertEquals("Custom normalizer [my_normalizer] may not use filter [mock_forbidden]", e.getMessage());
}
public void testIllegalCharFilters() throws IOException {
Settings settings = Settings.builder()
.putList("index.analysis.normalizer.my_normalizer.char_filter", "mock_forbidden")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, MOCK_ANALYSIS_PLUGIN)
);
assertEquals("Custom normalizer [my_normalizer] may not use char filter [mock_forbidden]", e.getMessage());
}
private static
|
CustomNormalizerTests
|
java
|
quarkusio__quarkus
|
integration-tests/vertx-http/src/test/java/io/quarkus/it/vertx/VertxProducerResourceTest.java
|
{
"start": 499,
"end": 1641
}
|
class ____ {
@TestHTTPResource(ssl = true)
URL url;
@Test
public void testInjection() {
get("/").then().body(containsString("vert.x has been injected"));
}
@Test
public void testInjectedRouter() {
given().contentType("text/plain").body("Hello world!")
.post("/").then().body(is("Hello world!"));
}
@Test
public void testRouteRegistration() {
get("/my-path").then().body(containsString("OK"));
}
@Test
public void testManagementInterfaceMissing() {
get("/management-interface-test").then().body(containsString("OK"));
}
@Test
public void testRouteRegistrationMTLS() {
RequestSpecification spec = new RequestSpecBuilder()
.setBaseUri(String.format("%s://%s", url.getProtocol(), url.getHost()))
.setPort(url.getPort())
.setKeyStore("client-keystore-1.jks", "password")
.setTrustStore("client-truststore.jks", "password")
.build();
given().spec(spec).get("/my-path").then().body(containsString("OK"));
}
}
|
VertxProducerResourceTest
|
java
|
apache__camel
|
components/camel-thrift/src/test/java/org/apache/camel/component/thrift/generated/Calculator.java
|
{
"start": 187855,
"end": 188122
}
|
class ____ implements org.apache.thrift.scheme.SchemeFactory {
@Override
public echo_resultTupleScheme getScheme() {
return new echo_resultTupleScheme();
}
}
private static
|
echo_resultTupleSchemeFactory
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/SubcomponentValidationTest.java
|
{
"start": 12821,
"end": 13164
}
|
interface ____ {",
" Object object();",
"}");
Source moduleFile =
CompilerTests.javaSource("test.ChildModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import javax.inject.Singleton;",
"",
"@Module",
"final
|
ChildComponent
|
java
|
google__guice
|
core/test/com/google/inject/NullableInjectionPointTest.java
|
{
"start": 8162,
"end": 8293
}
|
class ____ {
Foo foo;
@Inject
void setFoo(@Nullable Foo foo) {
this.foo = foo;
}
}
static
|
NullableFooMethod
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webmvc/src/test/java/org/springframework/boot/webmvc/autoconfigure/WebMvcAutoConfigurationTests.java
|
{
"start": 64089,
"end": 64349
}
|
class ____ {
@Bean
ServletRegistrationBean<DispatcherServlet> additionalDispatcherServlet() {
return new ServletRegistrationBean<>(new DispatcherServlet());
}
}
@Configuration(proxyBeanMethods = false)
static
|
AdditionalDispatcherServletConfiguration
|
java
|
apache__dubbo
|
dubbo-metadata/dubbo-metadata-api/src/test/java/org/apache/dubbo/metadata/report/identifier/KeyTypeEnumTest.java
|
{
"start": 1015,
"end": 1298
}
|
class ____ {
/**
* {@link KeyTypeEnum#build(String, String...)}
*/
@Test
void testBuild() {
assertEquals("/A/B/C", KeyTypeEnum.PATH.build("/A", "/B", "C"));
assertEquals("A:B:C", KeyTypeEnum.UNIQUE_KEY.build("A", "B", "C"));
}
}
|
KeyTypeEnumTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/ClassUtils.java
|
{
"start": 17568,
"end": 17639
}
|
class ____ at all
return false;
}
}
/**
* Resolve the given
|
found
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java
|
{
"start": 60087,
"end": 60308
}
|
interface ____ {",
" String value();",
"}");
Source nonScope =
CompilerTests.javaSource(
"test.NonScope",
"package test;",
"",
"@
|
CustomScope
|
java
|
google__dagger
|
javatests/dagger/android/support/functional/UsesGeneratedModulesApplication.java
|
{
"start": 3403,
"end": 3450
}
|
interface ____ {}
@Module
static
|
ActivityScope
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/SQLSecretManagerRetriableHandler.java
|
{
"start": 1639,
"end": 1735
}
|
interface ____ {
void doCall() throws SQLException;
}
@FunctionalInterface
|
SQLCommandVoid
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java
|
{
"start": 3191,
"end": 6156
}
|
interface ____'t use it.
try {
executeLocally(action, request, listener);
} catch (TaskCancelledException | IllegalArgumentException | IllegalStateException e) {
// #executeLocally returns the task and throws TaskCancelledException if it fails to register the task because the parent
// task has been cancelled, IllegalStateException if the client was not in a state to execute the request because it was not
// yet properly initialized or IllegalArgumentException if header validation fails we forward them to listener since this API
// does not concern itself with the specifics of the task handling
listener.onFailure(e);
}
}
/**
* Execute an {@link ActionType} locally, returning that {@link Task} used to track it, and linking an {@link ActionListener}.
* Prefer this method if you don't need access to the task when listening for the response. This is the method used to
* implement the {@link Client} interface.
*
* @throws TaskCancelledException if the request's parent task has been cancelled already
*/
public <Request extends ActionRequest, Response extends ActionResponse> Task executeLocally(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
return taskManager.registerAndExecute(
"transport",
transportAction(action),
request,
localConnection,
ActionListener.assertOnce(listener)
);
}
/**
* The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by
* {@link #executeLocally(ActionType, ActionRequest, ActionListener)}.
*/
public String getLocalNodeId() {
return localNodeId.get();
}
/**
* Get the {@link TransportAction} for an {@link ActionType}, throwing exceptions if the action isn't available.
*/
private <Request extends ActionRequest, Response extends ActionResponse> TransportAction<Request, Response> transportAction(
ActionType<Response> action
) {
if (actions == null) {
throw new IllegalStateException("NodeClient has not been initialized");
}
@SuppressWarnings("unchecked")
TransportAction<Request, Response> transportAction = (TransportAction<Request, Response>) actions.get(action);
if (transportAction == null) {
throw new IllegalStateException("failed to find action [" + action + "] to execute");
}
return transportAction;
}
@Override
public RemoteClusterClient getRemoteClusterClient(
String clusterAlias,
Executor responseExecutor,
RemoteClusterService.DisconnectedStrategy disconnectedStrategy
) {
return remoteClusterService.getRemoteClusterClient(clusterAlias, responseExecutor, disconnectedStrategy);
}
}
|
doesn
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/type/TypeBindings.java
|
{
"start": 5920,
"end": 7084
}
|
class ____ "+varLen);
}
return new TypeBindings(new String[] { vars[0].getName() },
new JavaType[] { typeArg1 }, null);
}
/**
* Alternate factory method that may be called if it is possible that type
* does or does not require type parameters; this is mostly useful for
* collection- and map-like types.
*/
public static TypeBindings createIfNeeded(Class<?> erasedType, JavaType[] types)
{
TypeVariable<?>[] vars = erasedType.getTypeParameters();
if (vars == null || vars.length == 0) {
return EMPTY;
}
if (types == null) {
types = NO_TYPES;
}
int len = vars.length;
String[] names = new String[len];
for (int i = 0; i < len; ++i) {
names[i] = vars[i].getName();
}
// Check here to give better error message
if (names.length != types.length) {
throw new IllegalArgumentException("Cannot create TypeBindings for class "+erasedType.getName()
+" with "+types.length+" type parameter"
+((types.length == 1) ? "" : "s")+":
|
expects
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_2800/Issue2903.java
|
{
"start": 3244,
"end": 3341
}
|
class ____ {
public java.time.LocalTime createTime;
}
public static
|
LoginRequestDTO3
|
java
|
elastic__elasticsearch
|
x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetricsReduceTests.java
|
{
"start": 953,
"end": 5628
}
|
class ____ extends ESTestCase {
public void testAllEmpty() {
InternalTopMetrics first = buildEmpty();
InternalTopMetrics reduced = reduce(first, buildEmpty(), buildEmpty(), buildEmpty());
assertThat(reduced, sameInstance(first));
}
public void testFirstEmpty() {
InternalTopMetrics first = buildEmpty();
InternalTopMetrics second = buildFilled(1, top(SortValue.from(1), 1.0));
InternalTopMetrics reduced = reduce(first, second);
assertThat(reduced, sameInstance(second));
}
public void testManyToReduce() {
InternalTopMetrics first = buildFilled(1, top(SortValue.from(2.0), randomDouble()));
InternalTopMetrics min = buildFilled(2, top(SortValue.from(1.0), randomDouble()));
InternalTopMetrics max = buildFilled(3, top(SortValue.from(7.0), randomDouble()));
InternalTopMetrics[] metrics = new InternalTopMetrics[] { first, max, min, buildEmpty(), buildEmpty(), };
InternalTopMetrics winner = first.getSortOrder() == SortOrder.ASC ? min : max;
InternalTopMetrics reduced = reduce(metrics);
assertThat(reduced.getName(), equalTo("test"));
assertThat(reduced.getMetricNames(), equalTo(singletonList("test")));
assertThat(reduced.getSortOrder(), equalTo(first.getSortOrder()));
assertThat(reduced.getSize(), equalTo(first.getSize()));
assertThat(reduced.getTopMetrics(), equalTo(winner.getTopMetrics()));
}
public void testNonZeroSize() {
InternalTopMetrics first = buildFilled(SortOrder.DESC, 3, top(SortValue.from(2.0), 1));
InternalTopMetrics second = buildFilled(2, top(SortValue.from(3.0), 2), top(SortValue.from(1.0), 2));
InternalTopMetrics third = buildFilled(3, top(SortValue.from(8.0), 4), top(SortValue.from(7.0), 5));
InternalTopMetrics[] metrics = new InternalTopMetrics[] { first, second, third, buildEmpty(), buildEmpty(), };
InternalTopMetrics reduced = reduce(metrics);
assertThat(reduced.getName(), equalTo("test"));
assertThat(reduced.getMetricNames(), equalTo(singletonList("test")));
assertThat(reduced.getSortOrder(), equalTo(first.getSortOrder()));
assertThat(reduced.getSize(), equalTo(first.getSize()));
assertThat(
reduced.getTopMetrics(),
equalTo(List.of(third.getTopMetrics().get(0), third.getTopMetrics().get(1), second.getTopMetrics().get(0)))
);
}
public void testDifferentTypes() {
InternalTopMetrics doubleMetrics = buildFilled(1, top(SortValue.from(100.0), randomDouble()));
InternalTopMetrics longMetrics = buildFilled(1, top(SortValue.from(7), randomDouble()));
InternalTopMetrics reduced = reduce(doubleMetrics, longMetrics);
// Doubles sort first.
InternalTopMetrics winner = doubleMetrics.getSortOrder() == SortOrder.ASC ? doubleMetrics : longMetrics;
assertThat(reduced.getName(), equalTo("test"));
assertThat(reduced.getMetricNames(), equalTo(singletonList("test")));
assertThat(reduced.getSortOrder(), equalTo(doubleMetrics.getSortOrder()));
assertThat(reduced.getSize(), equalTo(doubleMetrics.getSize()));
assertThat(reduced.getTopMetrics(), equalTo(winner.getTopMetrics()));
}
private InternalTopMetrics buildEmpty() {
return InternalTopMetrics.buildEmptyAggregation("test", singletonList("test"), null);
}
private InternalTopMetrics buildFilled(int size, InternalTopMetrics.TopMetric... metrics) {
return buildFilled(randomFrom(SortOrder.values()), size, metrics);
}
private InternalTopMetrics buildFilled(SortOrder sortOrder, int size, InternalTopMetrics.TopMetric... metrics) {
return new InternalTopMetrics("test", sortOrder, singletonList("test"), size, Arrays.asList(metrics), null);
}
private InternalTopMetrics.TopMetric top(SortValue sortValue, double metricValue) {
DocValueFormat sortFormat = randomFrom(DocValueFormat.RAW, DocValueFormat.BINARY, DocValueFormat.BOOLEAN, DocValueFormat.IP);
DocValueFormat metricFormat = randomFrom(DocValueFormat.RAW, DocValueFormat.BINARY, DocValueFormat.BOOLEAN, DocValueFormat.IP);
InternalTopMetrics.MetricValue realMetricValue = new InternalTopMetrics.MetricValue(metricFormat, SortValue.from(metricValue));
return new InternalTopMetrics.TopMetric(sortFormat, sortValue, singletonList(realMetricValue));
}
private InternalTopMetrics reduce(InternalTopMetrics... results) {
return (InternalTopMetrics) InternalAggregationTestCase.reduce(Arrays.asList(results), null);
}
}
|
InternalTopMetricsReduceTests
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/OrderedProgrammaticExtensionRegistrationTests.java
|
{
"start": 11106,
"end": 11467
}
|
interface ____ {
// @Order(3)
@RegisterExtension
Extension extension1 = new BeforeEachExtension(1);
// @Order(2)
@RegisterExtension
Extension extension2 = new BeforeEachExtension(2);
@Order(1)
@RegisterExtension
Extension extension3 = new BeforeEachExtension(3);
}
static
|
DefaultOrderAndExplicitOrderClassLevelExtensionRegistrationInterface
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
|
{
"start": 1248,
"end": 2913
}
|
class ____ {
/**
* We pre-define 4 most common used EnumCounters objects. When the nsSsCounts
* and tsCounts are set to the 4 most common used value, we just point them to
* the pre-defined const EnumCounters objects instead of constructing many
* objects with the same value. See HDFS-14547.
*/
final static EnumCounters<Quota> QUOTA_RESET =
new ConstEnumCounters<>(Quota.class, HdfsConstants.QUOTA_RESET);
final static EnumCounters<Quota> QUOTA_DEFAULT =
new ConstEnumCounters<>(Quota.class, 0);
final static EnumCounters<StorageType> STORAGE_TYPE_RESET =
new ConstEnumCounters<>(StorageType.class, HdfsConstants.QUOTA_RESET);
final static EnumCounters<StorageType> STORAGE_TYPE_DEFAULT =
new ConstEnumCounters<>(StorageType.class, 0);
/**
* Modify counter with action. If the counter is ConstEnumCounters, copy all
* the values of it to a new EnumCounters object, and modify the new obj.
*
* @param counter the EnumCounters to be modified.
* @param action the modifying action on counter.
* @return the modified counter.
*/
static <T extends Enum<T>> EnumCounters<T> modify(EnumCounters<T> counter,
Consumer<EnumCounters<T>> action) {
if (counter instanceof ConstEnumCounters) {
counter = counter.deepCopyEnumCounter();
}
action.accept(counter);
return counter;
}
// Name space and storage space counts (HDFS-7775 refactors the original disk
// space count to storage space counts)
@VisibleForTesting
EnumCounters<Quota> nsSsCounts;
// Storage type space counts
@VisibleForTesting
EnumCounters<StorageType> tsCounts;
public static
|
QuotaCounts
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
|
{
"start": 1027,
"end": 1333
}
|
class ____ {
public static final String WEBHDFS_SCHEME = "webhdfs";
public static final String SWEBHDFS_SCHEME = "swebhdfs";
public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation");
|
WebHdfsConstants
|
java
|
qos-ch__slf4j
|
log4j-over-slf4j/src/main/java/org/apache/log4j/Logger.java
|
{
"start": 754,
"end": 1022
}
|
class ____ a minimal implementation of the original
* <code>org.apache.log4j.Logger</code> class (as found in log4j 1.2)
* delegating all calls to a {@link org.slf4j.Logger} instance.
*
*
* @author Ceki Gülcü
* */
@SuppressWarnings("rawtypes")
public
|
is
|
java
|
quarkusio__quarkus
|
extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/AgroalMetricsTestCase.java
|
{
"start": 721,
"end": 2875
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("application-metrics-enabled.properties");
@Inject
AgroalDataSource defaultDS;
@Inject
@DataSource("ds1")
AgroalDataSource ds1;
@Inject
@RegistryType(type = MetricRegistry.Type.VENDOR)
MetricRegistry registry;
@Test
public void testMetricsOfDefaultDS() throws SQLException {
Counter acquireCount = registry.getCounters()
.get(new MetricID("agroal.acquire.count", new Tag("datasource", "default")));
Gauge<?> maxUsed = registry.getGauges()
.get(new MetricID("agroal.max.used.count", new Tag("datasource", "default")));
Assertions.assertNotNull(acquireCount, "Agroal metrics should be registered eagerly");
Assertions.assertNotNull(maxUsed, "Agroal metrics should be registered eagerly");
try (Connection connection = defaultDS.getConnection()) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 1");
}
}
Assertions.assertEquals(1L, acquireCount.getCount());
Assertions.assertEquals(1L, maxUsed.getValue());
}
@Test
public void testMetricsOfDs1() throws SQLException {
Counter acquireCount = registry.getCounters().get(new MetricID("agroal.acquire.count",
new Tag("datasource", "ds1")));
Gauge<?> maxUsed = registry.getGauges().get(new MetricID("agroal.max.used.count",
new Tag("datasource", "ds1")));
Assertions.assertNotNull(acquireCount, "Agroal metrics should be registered eagerly");
Assertions.assertNotNull(maxUsed, "Agroal metrics should be registered eagerly");
try (Connection connection = ds1.getConnection()) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 1");
}
}
Assertions.assertEquals(1L, acquireCount.getCount());
Assertions.assertEquals(1L, maxUsed.getValue());
}
}
|
AgroalMetricsTestCase
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/io/BufferedBinaryEncoder.java
|
{
"start": 5743,
"end": 6158
}
|
class ____ {
protected ByteSink() {
}
/** Write data from bytes, starting at off, for len bytes **/
protected abstract void innerWrite(byte[] bytes, int off, int len) throws IOException;
protected abstract void innerWrite(ByteBuffer buff) throws IOException;
/** Flush the underlying output, if supported **/
protected abstract void innerFlush() throws IOException;
}
static
|
ByteSink
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/LargeOrTest.java
|
{
"start": 1127,
"end": 2773
}
|
class ____ extends TestCase {
public void test_largeOr() throws Exception {
StringBuilder buf = new StringBuilder();
buf.append("SELECT 1 FROM T WHERE ID = ?");
for (int i = 0; i < 10000; ++i) {
buf.append(" OR ID = ?");
}
String sql = buf.toString();
OracleStatementParser parser = new OracleStatementParser(sql, SQLParserFeature.EnableSQLBinaryOpExprGroup);
SQLSelectStatement stmt = (SQLSelectStatement) parser.parseStatementList().get(0);
SQLSelectQueryBlock select = (SQLSelectQueryBlock) stmt.getSelect().getQuery();
SQLBinaryOpExprGroup where = (SQLBinaryOpExprGroup) select.getWhere();
SQLBinaryOpExpr last = (SQLBinaryOpExpr) where.getItems().get(0);
assertEquals(SQLBinaryOperator.Equality, last.getOperator());
}
public void test_largeAnd() throws Exception {
StringBuilder buf = new StringBuilder();
buf.append("SELECT 1 FROM T WHERE ID = ?");
for (int i = 0; i < 10000; ++i) {
buf.append(" AND ID = ?");
}
String sql = buf.toString();
OracleStatementParser parser = new OracleStatementParser(sql, SQLParserFeature.EnableSQLBinaryOpExprGroup);
SQLSelectStatement stmt = (SQLSelectStatement) parser.parseStatementList().get(0);
SQLSelectQueryBlock select = (SQLSelectQueryBlock) stmt.getSelect().getQuery();
SQLBinaryOpExprGroup where = (SQLBinaryOpExprGroup) select.getWhere();
SQLBinaryOpExpr last = (SQLBinaryOpExpr) where.getItems().get(0);
assertEquals(SQLBinaryOperator.Equality, last.getOperator());
}
}
|
LargeOrTest
|
java
|
apache__camel
|
components/camel-ftp/src/main/java/org/apache/camel/component/file/remote/SftpRemoteFileJCraft.java
|
{
"start": 896,
"end": 1694
}
|
class ____ implements SftpRemoteFile<ChannelSftp.LsEntry> {
private final ChannelSftp.LsEntry entry;
public SftpRemoteFileJCraft(ChannelSftp.LsEntry entry) {
this.entry = entry;
}
@Override
public ChannelSftp.LsEntry getRemoteFile() {
return entry;
}
@Override
public String getFilename() {
return entry.getFilename();
}
@Override
public String getLongname() {
return entry.getLongname();
}
@Override
public boolean isDirectory() {
return entry.getAttrs().isDir();
}
@Override
public long getFileLength() {
return entry.getAttrs().getSize();
}
@Override
public long getLastModified() {
return entry.getAttrs().getMTime() * 1000L;
}
}
|
SftpRemoteFileJCraft
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/event/WatermarkEvent.java
|
{
"start": 3448,
"end": 4321
}
|
class ____: " + watermarkTypeTag);
}
}
public Watermark getWatermark() {
return watermark;
}
public boolean isAligned() {
return isAligned;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
WatermarkEvent that = (WatermarkEvent) o;
return isAligned == that.isAligned && Objects.equals(watermark, that.watermark);
}
@Override
public int hashCode() {
return Objects.hash(watermark, isAligned);
}
@Override
public String toString() {
return "WatermarkEvent{"
+ "watermarkElement="
+ watermark
+ ", isAligned="
+ isAligned
+ '}';
}
}
|
tag
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/support/swagger/SwaggerUIRequestHandler.java
|
{
"start": 2107,
"end": 6128
}
|
class ____ implements OpenAPIRequestHandler {
private static final String DEFAULT_CDN = "https://unpkg.com/swagger-ui-dist@5.18.2";
private static final String INDEX_PATH = "/META-INF/resources/swagger-ui/index.html";
private final FrameworkModel frameworkModel;
private final ConfigFactory configFactory;
private OpenAPIConfig config;
public SwaggerUIRequestHandler(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
configFactory = frameworkModel.getOrRegisterBean(ConfigFactory.class);
}
private OpenAPIConfig getConfig() {
if (config == null) {
config = configFactory.getGlobalConfig();
}
return config;
}
@Override
public String[] getPaths() {
return new String[] {"/swagger-ui/{*path}"};
}
@Override
public HttpResult<?> handle(String path, HttpRequest request, HttpResponse response) {
String resPath = RequestUtils.getPathVariable(request, "path");
if (StringUtils.isEmpty(resPath)) {
throw HttpResult.found(PathUtils.join(request.uri(), "index.html")).toPayload();
}
String requestPath = StringUtils.substringBeforeLast(resPath, '.');
switch (requestPath) {
case "index":
return handleIndex();
case "swagger-config":
return handleSwaggerConfig();
default:
if (WebjarHelper.ENABLED && requestPath.startsWith("assets/")) {
return WebjarHelper.getInstance().handleAssets("swagger-ui", resPath.substring(7));
}
}
throw new HttpStatusException(HttpStatus.NOT_FOUND.getCode());
}
private HttpResult<?> handleIndex() {
Map<String, String> variables = new HashMap<>(4);
OpenAPIConfig config = getConfig();
String cdn = config.getSetting("swagger-ui.cdn");
if (cdn == null) {
if (WebjarHelper.ENABLED && WebjarHelper.getInstance().hasWebjar("swagger-ui")) {
cdn = "./assets";
} else {
cdn = DEFAULT_CDN;
}
}
variables.put("swagger-ui.cdn", cdn);
Map<String, String> settings = config.getSettings();
if (settings != null) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : settings.entrySet()) {
String key = entry.getKey();
if (key.startsWith("swagger-ui.settings.")) {
sb.append(",\n \"")
.append(key.substring(20))
.append("\": ")
.append(entry.getValue());
}
}
if (sb.length() > 0) {
variables.put("swagger-ui.settings", sb.toString());
}
}
try {
String content = StreamUtils.toString(getClass().getResourceAsStream(INDEX_PATH));
return HttpResult.of(Helper.render(content, variables::get).getBytes(UTF_8));
} catch (IOException e) {
throw new HttpStatusException(HttpStatus.INTERNAL_SERVER_ERROR.getCode(), e);
}
}
private HttpResult<?> handleSwaggerConfig() {
OpenAPIService openAPIService = frameworkModel.getBean(OpenAPIService.class);
if (openAPIService == null) {
return HttpResult.notFound();
}
Collection<String> groups = openAPIService.getOpenAPIGroups();
List<Map<String, String>> urls = new ArrayList<>();
for (String group : groups) {
Map<String, String> url = new LinkedHashMap<>(4);
url.put("name", group);
url.put("url", "../api-docs/" + group);
urls.add(url);
}
Map<String, Object> configMap = new LinkedHashMap<>();
configMap.put("urls", urls);
return HttpResult.of(JsonUtils.toJson(configMap).getBytes(UTF_8));
}
}
|
SwaggerUIRequestHandler
|
java
|
apache__flink
|
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroOutputFormat.java
|
{
"start": 1554,
"end": 1738
}
|
class ____<E> extends FileOutputFormat<E> implements Serializable {
/** Wrapper which encapsulates the supported codec and a related serialization byte. */
public
|
AvroOutputFormat
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java
|
{
"start": 1392,
"end": 27210
}
|
class ____ {
private final Acknowledgements acks = Acknowledgements.empty();
private final int maxRecordsWithSameAcknowledgeType = Acknowledgements.MAX_RECORDS_WITH_SAME_ACKNOWLEDGE_TYPE;
@Test
public void testEmptyBatch() {
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertTrue(ackList.isEmpty());
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertTrue(ackList2.isEmpty());
}
@Test
public void testSingleStateSingleRecord() {
acks.add(0L, AcknowledgeType.ACCEPT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(0L, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(0L, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
}
@Test
public void testSingleStateMultiRecord() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.ACCEPT);
acks.add(2L, AcknowledgeType.ACCEPT);
acks.add(3L, AcknowledgeType.ACCEPT);
acks.add(4L, AcknowledgeType.ACCEPT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertNotEquals(0, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertNotEquals(0, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
}
@Test
public void testSingleAcknowledgeTypeExceedingLimit() {
int i = 0;
for (; i < maxRecordsWithSameAcknowledgeType; i++) {
acks.add(i, AcknowledgeType.ACCEPT);
}
acks.add(i++, AcknowledgeType.ACCEPT);
acks.add(i++, AcknowledgeType.ACCEPT);
for (int j = 0; j <= maxRecordsWithSameAcknowledgeType; j++) {
acks.add(i + j, AcknowledgeType.REJECT);
}
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(2, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 1, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(maxRecordsWithSameAcknowledgeType + 2, ackList.get(1).firstOffset());
assertEquals(i + maxRecordsWithSameAcknowledgeType, ackList.get(1).lastOffset());
assertEquals(1, ackList.get(1).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList.get(1).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(2, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 1, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(maxRecordsWithSameAcknowledgeType + 2, ackList2.get(1).firstOffset());
assertEquals(i + maxRecordsWithSameAcknowledgeType, ackList2.get(1).lastOffset());
assertEquals(1, ackList2.get(1).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList2.get(1).acknowledgeTypes().get(0));
}
@Test
public void testSingleAcknowledgeTypeWithGap() {
for (int i = 0; i < maxRecordsWithSameAcknowledgeType; i++) {
acks.add(i, null);
}
acks.add(maxRecordsWithSameAcknowledgeType, null);
acks.add(maxRecordsWithSameAcknowledgeType + 1, null);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 1, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 1, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(0));
}
@Test
public void testOptimiseBatches() {
int offset = 0;
for (; offset < maxRecordsWithSameAcknowledgeType; offset++) {
acks.add(offset, AcknowledgeType.ACCEPT);
}
acks.add(offset++, AcknowledgeType.REJECT);
acks.add(offset++, AcknowledgeType.ACCEPT);
acks.add(offset++, AcknowledgeType.RELEASE);
acks.add(offset++, null);
// Adding more than the max records
for (int j = 0; j <= maxRecordsWithSameAcknowledgeType; j++) {
acks.add(offset + j, AcknowledgeType.ACCEPT);
}
offset += maxRecordsWithSameAcknowledgeType + 1;
// Adding 2 more records of different type
acks.add(offset++, AcknowledgeType.REJECT);
acks.add(offset, AcknowledgeType.RELEASE);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(3, ackList.size());
assertEquals(0, ackList.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 3, ackList.get(0).lastOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 4, ackList.get(1).firstOffset());
assertEquals(2 * maxRecordsWithSameAcknowledgeType + 4, ackList.get(1).lastOffset());
assertEquals(1, ackList.get(1).acknowledgeTypes().size());
assertEquals(offset - 1, ackList.get(2).firstOffset());
assertEquals(offset, ackList.get(2).lastOffset());
assertEquals(2, ackList.get(2).acknowledgeTypes().size());
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(3, ackList2.size());
assertEquals(0, ackList2.get(0).firstOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 3, ackList2.get(0).lastOffset());
assertEquals(maxRecordsWithSameAcknowledgeType + 4, ackList2.get(1).firstOffset());
assertEquals(2 * maxRecordsWithSameAcknowledgeType + 4, ackList2.get(1).lastOffset());
assertEquals(1, ackList2.get(1).acknowledgeTypes().size());
assertEquals(offset - 1, ackList2.get(2).firstOffset());
assertEquals(offset, ackList2.get(2).lastOffset());
assertEquals(2, ackList2.get(2).acknowledgeTypes().size());
}
@Test
public void testSingleAcknowledgeTypeWithinLimit() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.ACCEPT);
acks.add(2L, AcknowledgeType.ACCEPT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
}
@Test
public void testMultiStateMultiRecord() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.ACCEPT);
acks.add(2L, AcknowledgeType.ACCEPT);
acks.add(3L, AcknowledgeType.RELEASE);
acks.add(4L, AcknowledgeType.RELEASE);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertEquals(5, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(4));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertEquals(5, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(4));
}
@Test
public void testMultiStateSingleMultiRecord() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.RELEASE);
acks.add(2L, AcknowledgeType.RELEASE);
acks.add(3L, AcknowledgeType.RELEASE);
acks.add(4L, AcknowledgeType.RELEASE);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertEquals(5, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(4));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertEquals(5, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(4));
}
@Test
public void testMultiStateMultiSingleRecord() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.ACCEPT);
acks.add(2L, AcknowledgeType.ACCEPT);
acks.add(3L, AcknowledgeType.ACCEPT);
acks.add(4L, AcknowledgeType.RELEASE);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertEquals(5, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(4));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertEquals(5, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(4));
}
@Test
public void testSingleGap() {
acks.addGap(0L);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(0L, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(0L, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(0));
}
@Test
public void testMultiGap() {
acks.addGap(0L);
acks.addGap(1L);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(1L, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(1L, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(0));
}
@Test
public void testSingleGapSingleState() {
acks.addGap(0L);
acks.add(1L, AcknowledgeType.ACCEPT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(1L, ackList.get(0).lastOffset());
assertEquals(2, ackList.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(1));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(1L, ackList2.get(0).lastOffset());
assertEquals(2, ackList2.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(1));
}
@Test
public void testSingleStateSingleGap() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.addGap(1L);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(1L, ackList.get(0).lastOffset());
assertEquals(2, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(1));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(1L, ackList2.get(0).lastOffset());
assertEquals(2, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(1));
}
@Test
public void testMultiStateMultiGap() {
acks.add(0L, AcknowledgeType.RELEASE);
acks.addGap(1L);
acks.addGap(2L);
acks.add(3L, AcknowledgeType.ACCEPT);
acks.add(4L, AcknowledgeType.ACCEPT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertEquals(5, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(4));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertEquals(5, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(3));
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(4));
}
@Test
public void testMultiStateMultiGaps() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.RELEASE);
acks.addGap(2L);
acks.add(3L, AcknowledgeType.RELEASE);
acks.addGap(4L);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(1, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(4L, ackList.get(0).lastOffset());
assertEquals(5, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(3));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(4));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(1, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(4L, ackList2.get(0).lastOffset());
assertEquals(5, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(2));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(3));
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(4));
}
@Test
public void testNoncontiguousBatches() {
acks.add(0L, AcknowledgeType.ACCEPT);
acks.add(1L, AcknowledgeType.RELEASE);
acks.add(3L, AcknowledgeType.REJECT);
acks.add(4L, AcknowledgeType.REJECT);
acks.add(6L, AcknowledgeType.REJECT);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(3, ackList.size());
assertEquals(0L, ackList.get(0).firstOffset());
assertEquals(1L, ackList.get(0).lastOffset());
assertEquals(2, ackList.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList.get(0).acknowledgeTypes().get(1));
assertEquals(3L, ackList.get(1).firstOffset());
assertEquals(4L, ackList.get(1).lastOffset());
assertEquals(1, ackList.get(1).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList.get(1).acknowledgeTypes().get(0));
assertEquals(6L, ackList.get(2).firstOffset());
assertEquals(6L, ackList.get(2).lastOffset());
assertEquals(1, ackList.get(2).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList.get(2).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(3, ackList2.size());
assertEquals(0L, ackList2.get(0).firstOffset());
assertEquals(1L, ackList2.get(0).lastOffset());
assertEquals(2, ackList2.get(0).acknowledgeTypes().size());
assertEquals(AcknowledgeType.ACCEPT.id, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(AcknowledgeType.RELEASE.id, ackList2.get(0).acknowledgeTypes().get(1));
assertEquals(3L, ackList2.get(1).firstOffset());
assertEquals(4L, ackList2.get(1).lastOffset());
assertEquals(1, ackList2.get(1).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList2.get(1).acknowledgeTypes().get(0));
assertEquals(6L, ackList2.get(2).firstOffset());
assertEquals(6L, ackList2.get(2).lastOffset());
assertEquals(1, ackList2.get(2).acknowledgeTypes().size());
assertEquals(AcknowledgeType.REJECT.id, ackList2.get(2).acknowledgeTypes().get(0));
}
@Test
public void testNoncontiguousGaps() {
acks.addGap(2L);
acks.addGap(4L);
List<AcknowledgementBatch> ackList = acks.getAcknowledgementBatches();
assertEquals(2, ackList.size());
assertEquals(2L, ackList.get(0).firstOffset());
assertEquals(2L, ackList.get(0).lastOffset());
assertEquals(1, ackList.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(0).acknowledgeTypes().get(0));
assertEquals(4L, ackList.get(1).firstOffset());
assertEquals(4L, ackList.get(1).lastOffset());
assertEquals(1, ackList.get(1).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList.get(1).acknowledgeTypes().get(0));
List<AcknowledgementBatch> ackList2 = acks.getAcknowledgementBatches();
assertEquals(2, ackList2.size());
assertEquals(2L, ackList2.get(0).firstOffset());
assertEquals(2L, ackList2.get(0).lastOffset());
assertEquals(1, ackList2.get(0).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(0).acknowledgeTypes().get(0));
assertEquals(4L, ackList2.get(1).firstOffset());
assertEquals(4L, ackList2.get(1).lastOffset());
assertEquals(1, ackList2.get(1).acknowledgeTypes().size());
assertEquals(Acknowledgements.ACKNOWLEDGE_TYPE_GAP, ackList2.get(1).acknowledgeTypes().get(0));
}
@Test
public void testCompleteSuccess() {
acks.add(0, AcknowledgeType.RENEW);
assertFalse(acks.isCompleted());
acks.complete(null);
assertTrue(acks.isCompleted());
assertNull(acks.getAcknowledgeException());
}
@Test
public void testCompleteException() {
acks.add(0, AcknowledgeType.RENEW);
assertFalse(acks.isCompleted());
acks.complete(new KafkaException());
assertTrue(acks.isCompleted());
assertNotNull(acks.getAcknowledgeException());
}
}
|
AcknowledgementsTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java
|
{
"start": 1145,
"end": 2420
}
|
class ____ extends BaseRestHandler {
@Override
public String getName() {
return "update_desired_nodes";
}
@Override
public List<Route> routes() {
return List.of(new Route(RestRequest.Method.PUT, "_internal/desired_nodes/{history_id}/{version}"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
final String historyId = request.param("history_id");
final long version = request.paramAsLong("version", Long.MIN_VALUE);
boolean dryRun = request.paramAsBoolean("dry_run", false);
final UpdateDesiredNodesRequest updateDesiredNodesRequest;
try (XContentParser parser = request.contentParser()) {
updateDesiredNodesRequest = UpdateDesiredNodesRequest.fromXContent(
getMasterNodeTimeout(request),
getAckTimeout(request),
historyId,
version,
dryRun,
parser
);
}
return restChannel -> client.execute(
UpdateDesiredNodesAction.INSTANCE,
updateDesiredNodesRequest,
new RestToXContentListener<>(restChannel)
);
}
}
|
RestUpdateDesiredNodesAction
|
java
|
quarkusio__quarkus
|
extensions/oidc-common/runtime/src/main/java/io/quarkus/oidc/common/runtime/config/OidcClientCommonConfigBuilder.java
|
{
"start": 9393,
"end": 13099
}
|
class ____<T> {
private record SecretImpl(Optional<String> value, Optional<Method> method, Provider provider) implements Secret {
}
private final CredentialsBuilder<T> builder;
private Optional<String> value;
private Optional<Method> method;
private Provider provider;
public SecretBuilder() {
this.builder = null;
this.value = Optional.empty();
this.method = Optional.empty();
this.provider = new ProviderBuilder<>().build();
}
public SecretBuilder(CredentialsBuilder<T> builder) {
this.builder = Objects.requireNonNull(builder);
this.value = builder.clientSecret.value();
this.method = builder.clientSecret.method();
this.provider = builder.clientSecret.provider();
}
/**
* @param method {@link Secret#method()}
* @return this builder
*/
public SecretBuilder<T> method(Method method) {
this.method = Optional.ofNullable(method);
return this;
}
/**
* @param value {@link Secret#value()}
* @return this builder
*/
public SecretBuilder<T> value(String value) {
this.value = Optional.ofNullable(value);
return this;
}
/**
* @param provider {@link Secret#provider()} created with the {@link ProviderBuilder} or SmallRye Config
* @return this builder
*/
public SecretBuilder<T> provider(Provider provider) {
this.provider = Objects.requireNonNull(provider);
return this;
}
/**
* Adds {@link Secret#provider()}.
*
* @param key {@link Provider#key()}
* @return this builder
*/
public SecretBuilder<T> provider(String key) {
return provider().key(key).end();
}
/**
* Adds {@link Secret#provider()}.
*
* @param key {@link Provider#key()}
* @param name {@link Provider#name()}
* @return this builder
*/
public SecretBuilder<T> provider(String key, String name) {
return provider().key(key).name(name).end();
}
/**
* Adds {@link Secret#provider()}.
*
* @param key {@link Provider#key()}
* @param name {@link Provider#name()}
* @param keyringName {@link Provider#keyringName()}
* @return this builder
*/
public SecretBuilder<T> provider(String key, String name, String keyringName) {
return provider().key(key).name(name).keyringName(keyringName).end();
}
/**
* Creates {@link Secret#provider()} builder.
*
* @return ProviderBuilder
*/
public ProviderBuilder<SecretBuilder<T>> provider() {
return new ProviderBuilder<>(this::provider, provider);
}
/**
* Builds {@link Secret} client secret.
*
* @return CredentialsBuilder
*/
public CredentialsBuilder<T> end() {
Objects.requireNonNull(builder);
return builder.clientSecret(build());
}
/**
* Builds {@link Credentials#clientSecret()} and {@link OidcClientCommonConfig#credentials()}.
*
* @return T builder
*/
public T endCredentials() {
return end().end();
}
public Secret build() {
return new SecretImpl(value, method, provider);
}
}
/**
* The {@link Provider} builder.
*/
public static final
|
SecretBuilder
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
|
{
"start": 9753,
"end": 13139
}
|
class ____ to prevent lost signals, while trying hard to
* minimize unnecessary signals. One simple and correct algorithm is to signal some other waiter
* with a satisfied guard (if one exists) whenever any thread occupying the monitor exits the
* monitor, either by unlocking all of its held locks, or by starting to wait for a guard. This
* includes exceptional exits, so all control paths involving signalling must be protected by a
* finally block.
*
* Further optimizations of this algorithm become increasingly subtle. A wait that terminates
* without the guard being satisfied (due to timeout, but not interrupt) can then immediately exit
* the monitor without signalling. If it timed out without being signalled, it does not need to
* "pass on" the signal to another thread. If it *was* signalled, then its guard must have been
* satisfied at the time of signal, and has since been modified by some other thread to be
* non-satisfied before reacquiring the lock, and that other thread takes over the responsibility
* of signaling the next waiter.
*
* Unlike the underlying Condition, if we are not careful, an interrupt *can* cause a signal to be
* lost, because the signal may be sent to a condition whose sole waiter has just been
* interrupted.
*
* Imagine a monitor with multiple guards. A thread enters the monitor, satisfies all the guards,
* and leaves, calling signalNextWaiter. With traditional locks and conditions, all the conditions
* need to be signalled because it is not known which if any of them have waiters (and hasWaiters
* can't be used reliably because of a check-then-act race). With our Monitor guards, we only
* signal the first active guard that is satisfied. But the corresponding thread may have already
* been interrupted and is waiting to reacquire the lock while still registered in activeGuards,
* in which case the signal is a no-op, and the bigger-picture signal is lost unless interrupted
* threads take special action by participating in the signal-passing game.
*/
/*
* Timeout handling is intricate, especially given our ambitious goals:
* - Avoid underflow and overflow of timeout values when specified timeouts are close to
* Long.MIN_VALUE or Long.MAX_VALUE.
* - Favor responding to interrupts over timeouts.
* - System.nanoTime() is expensive enough that we want to call it the minimum required number of
* times, typically once before invoking a blocking method. This often requires keeping track of
* the first time in a method that nanoTime() has been invoked, for which the special value 0L
* is reserved to mean "uninitialized". If timeout is non-positive, then nanoTime need never be
* called.
* - Keep behavior of fair and non-fair instances consistent.
*/
/**
* A boolean condition for which a thread may wait. A {@code Guard} is associated with a single
* {@code Monitor}. The monitor may check the guard at arbitrary times from any thread occupying
* the monitor, so code should not be written to rely on how often a guard might or might not be
* checked.
*
* <p>If a {@code Guard} is passed into any method of a {@code Monitor} other than the one it is
* associated with, an {@link IllegalMonitorStateException} is thrown.
*
* @since 10.0
*/
public abstract static
|
is
|
java
|
apache__camel
|
components/camel-mail/src/generated/java/org/apache/camel/component/mail/MailEndpointUriFactory.java
|
{
"start": 514,
"end": 5365
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host:port";
private static final String[] SCHEMES = new String[]{"imap", "imaps", "pop3", "pop3s", "smtp", "smtps"};
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(71);
props.add("additionalJavaMailProperties");
props.add("alternativeBodyHeader");
props.add("attachmentsContentTransferEncodingResolver");
props.add("authenticator");
props.add("backoffErrorThreshold");
props.add("backoffIdleThreshold");
props.add("backoffMultiplier");
props.add("bcc");
props.add("binding");
props.add("bridgeErrorHandler");
props.add("cc");
props.add("closeFolder");
props.add("connectionTimeout");
props.add("contentType");
props.add("contentTypeResolver");
props.add("copyTo");
props.add("debugMode");
props.add("decodeFilename");
props.add("delay");
props.add("delete");
props.add("disconnect");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("failOnDuplicateFileAttachment");
props.add("fetchSize");
props.add("folderName");
props.add("from");
props.add("generateMissingAttachmentNames");
props.add("greedy");
props.add("handleDuplicateAttachmentNames");
props.add("handleFailedMessage");
props.add("headerFilterStrategy");
props.add("host");
props.add("idempotentRepository");
props.add("idempotentRepositoryRemoveOnCommit");
props.add("ignoreUnsupportedCharset");
props.add("ignoreUriScheme");
props.add("initialDelay");
props.add("javaMailProperties");
props.add("javaMailSender");
props.add("lazyStartProducer");
props.add("mailUidGenerator");
props.add("mapMailMessage");
props.add("maxMessagesPerPoll");
props.add("mimeDecodeHeaders");
props.add("moveTo");
props.add("password");
props.add("peek");
props.add("pollStrategy");
props.add("port");
props.add("postProcessAction");
props.add("repeatCount");
props.add("replyTo");
props.add("runLoggingLevel");
props.add("scheduledExecutorService");
props.add("scheduler");
props.add("schedulerProperties");
props.add("searchTerm");
props.add("sendEmptyMessageWhenIdle");
props.add("session");
props.add("skipFailedMessage");
props.add("sortTerm");
props.add("sslContextParameters");
props.add("startScheduler");
props.add("subject");
props.add("timeUnit");
props.add("to");
props.add("unseen");
props.add("useFixedDelay");
props.add("useInlineAttachments");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(2);
secretProps.add("password");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
Map<String, String> prefixes = new HashMap<>(3);
prefixes.put("additionalJavaMailProperties", "mail.");
prefixes.put("schedulerProperties", "scheduler.");
prefixes.put("searchTerm", "searchTerm.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
for (String s : SCHEMES) {
if (s.equals(scheme)) {
return true;
}
}
return false;
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", null, true, copy);
uri = buildPathParameter(syntax, uri, "port", null, false, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
MailEndpointUriFactory
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointSettingsSerializableTest.java
|
{
"start": 2461,
"end": 5285
}
|
class ____ {
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_EXTENSION =
TestingUtils.defaultExecutorExtension();
@Test
void testDeserializationOfUserCodeWithUserClassLoader() throws Exception {
final ClassLoaderUtils.ObjectAndClassLoader<Serializable> outsideClassLoading =
ClassLoaderUtils.createSerializableObjectFromNewClassLoader();
final ClassLoader classLoader = outsideClassLoading.getClassLoader();
final Serializable outOfClassPath = outsideClassLoading.getObject();
final MasterTriggerRestoreHook.Factory[] hooks = {new TestFactory(outOfClassPath)};
final SerializedValue<MasterTriggerRestoreHook.Factory[]> serHooks =
new SerializedValue<>(hooks);
final JobCheckpointingSettings checkpointingSettings =
new JobCheckpointingSettings(
new CheckpointCoordinatorConfiguration(
1000L,
10000L,
0L,
1,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
true,
false,
0,
0),
new SerializedValue<StateBackend>(new CustomStateBackend(outOfClassPath)),
TernaryBoolean.UNDEFINED,
new SerializedValue<>(new CustomCheckpointStorage(outOfClassPath)),
serHooks);
final JobGraph jobGraph =
JobGraphBuilder.newStreamingJobGraphBuilder()
.setJobCheckpointingSettings(checkpointingSettings)
.build();
// to serialize/deserialize the job graph to see if the behavior is correct under
// distributed execution
final JobGraph copy = CommonTestUtils.createCopySerializable(jobGraph);
final ExecutionGraph eg =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(copy)
.setUserClassLoader(classLoader)
.build(EXECUTOR_EXTENSION.getExecutor());
assertThat(eg.getCheckpointCoordinator().getNumberOfRegisteredMasterHooks()).isOne();
assertThat(
jobGraph.getCheckpointingSettings()
.getDefaultStateBackend()
.deserializeValue(classLoader))
.isInstanceOf(CustomStateBackend.class);
}
// ------------------------------------------------------------------------
private static final
|
CheckpointSettingsSerializableTest
|
java
|
netty__netty
|
handler/src/test/java/io/netty/handler/ssl/SslContextBuilderTest.java
|
{
"start": 1867,
"end": 22959
}
|
class ____ {
@Test
public void testClientContextFromFileJdk() throws Exception {
testClientContextFromFile(SslProvider.JDK);
}
@Test
public void testClientContextFromFileOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testClientContextFromFile(SslProvider.OPENSSL);
}
@Test
public void testClientContextJdk() throws Exception {
testClientContext(SslProvider.JDK);
}
@Test
public void testClientContextOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testClientContext(SslProvider.OPENSSL);
}
@Test
public void testCombinedPemFileClientContextJdk() throws Exception {
testServerContextWithCombinedCertAndKeyInPem(SslProvider.JDK);
}
@Test
public void testCombinedPemFileClientContextOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testServerContextWithCombinedCertAndKeyInPem(SslProvider.OPENSSL);
}
@Test
public void testKeyStoreTypeJdk() throws Exception {
testKeyStoreType(SslProvider.JDK);
}
@Test
public void testKeyStoreTypeOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testKeyStoreType(SslProvider.OPENSSL);
}
@Test
public void testServerContextFromFileJdk() throws Exception {
testServerContextFromFile(SslProvider.JDK);
}
@Test
public void testServerContextFromFileOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testServerContextFromFile(SslProvider.OPENSSL);
}
@Test
public void testServerContextJdk() throws Exception {
testServerContext(SslProvider.JDK);
}
@Test
public void testServerContextOpenssl() throws Exception {
OpenSsl.ensureAvailability();
testServerContext(SslProvider.OPENSSL);
}
@Test
public void testContextFromManagersJdk() throws Exception {
testContextFromManagers(SslProvider.JDK);
}
@Test
public void testContextFromManagersOpenssl() throws Exception {
OpenSsl.ensureAvailability();
assumeTrue(OpenSsl.useKeyManagerFactory());
testContextFromManagers(SslProvider.OPENSSL);
}
@Test
public void testUnsupportedPrivateKeyFailsFastForServer() {
assumeTrue(OpenSsl.isBoringSSL() || OpenSsl.isAWSLC());
testUnsupportedPrivateKeyFailsFast(true);
}
@Test
public void testUnsupportedPrivateKeyFailsFastForClient() {
assumeTrue(OpenSsl.isBoringSSL() || OpenSsl.isAWSLC());
testUnsupportedPrivateKeyFailsFast(false);
}
private static void testUnsupportedPrivateKeyFailsFast(boolean server) {
assumeTrue(OpenSsl.isBoringSSL() || OpenSsl.isAWSLC());
String cert = "-----BEGIN CERTIFICATE-----\n" +
"MIICODCCAY2gAwIBAgIEXKTrajAKBggqhkjOPQQDBDBUMQswCQYDVQQGEwJVUzEM\n" +
"MAoGA1UECAwDTi9hMQwwCgYDVQQHDANOL2ExDDAKBgNVBAoMA04vYTEMMAoGA1UE\n" +
"CwwDTi9hMQ0wCwYDVQQDDARUZXN0MB4XDTE5MDQwMzE3MjA0MloXDTIwMDQwMjE3\n" +
"MjA0MlowVDELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA04vYTEMMAoGA1UEBwwDTi9h\n" +
"MQwwCgYDVQQKDANOL2ExDDAKBgNVBAsMA04vYTENMAsGA1UEAwwEVGVzdDCBpzAQ\n" +
"BgcqhkjOPQIBBgUrgQQAJwOBkgAEBPYWoTjlS2pCMGEM2P8qZnmURWA5e7XxPfIh\n" +
"HA876sjmgjJluPgT0OkweuxI4Y/XjzcPnnEBONgzAV1X93UmXdtRiIau/zvsAeFb\n" +
"j/q+6sfj1jdnUk6QsMx22kAwplXHmdz1z5ShXQ7mDZPxDbhCPEAUXzIzOqvWIZyA\n" +
"HgFxZXmQKEhExA8nxgSIvzQ3ucMwMAoGCCqGSM49BAMEA4GYADCBlAJIAdPD6jaN\n" +
"vGxkxcsIbcHn2gSfP1F1G8iNJYrXIN91KbQm8OEp4wxqnBwX8gb/3rmSoEhIU/te\n" +
"CcHuFs0guBjfgRWtJ/eDnKB/AkgDbkqrB5wqJFBmVd/rJ5QdwUVNuGP/vDjFVlb6\n" +
"Esny6//gTL7jYubLUKHOPIMftCZ2Jn4b+5l0kAs62HD5XkZLPDTwRbf7VCE=\n" +
"-----END CERTIFICATE-----";
String key = "-----BEGIN PRIVATE KEY-----\n" +
"MIIBCQIBADAQBgcqhkjOPQIBBgUrgQQAJwSB8TCB7gIBAQRIALNClTXqQWWlYDHw\n" +
"LjNxXpLk17iPepkmablhbxmYX/8CNzoz1o2gcUidoIO2DM9hm7adI/W31EOmSiUJ\n" +
"+UsC/ZH3i2qr0wn+oAcGBSuBBAAnoYGVA4GSAAQE9hahOOVLakIwYQzY/ypmeZRF\n" +
"YDl7tfE98iEcDzvqyOaCMmW4+BPQ6TB67Ejhj9ePNw+ecQE42DMBXVf3dSZd21GI\n" +
"hq7/O+wB4VuP+r7qx+PWN2dSTpCwzHbaQDCmVceZ3PXPlKFdDuYNk/ENuEI8QBRf\n" +
"MjM6q9YhnIAeAXFleZAoSETEDyfGBIi/NDe5wzA=\n" +
"-----END PRIVATE KEY-----";
ByteArrayInputStream certStream = new ByteArrayInputStream(cert.getBytes(CharsetUtil.US_ASCII));
ByteArrayInputStream keyStream = new ByteArrayInputStream(key.getBytes(CharsetUtil.US_ASCII));
final SslContextBuilder builder;
try {
if (server) {
builder = SslContextBuilder.forServer(certStream, keyStream, null);
} else {
builder = SslContextBuilder.forClient().keyManager(certStream, keyStream, null);
}
} catch (IllegalArgumentException e) {
assumeFalse("Input stream not contain valid certificates.".equals(e.getMessage())
&& e.getCause() != null
&& "java.io.IOException: Unknown named curve: 1.3.132.0.39".equals(
e.getCause().getMessage()),
"Cannot test that SslProvider rejects certificates with curve " +
"1.3.132.0.39 because the key manager does not know the curve either.");
throw e;
}
assertThrows(SSLException.class, new Executable() {
@Override
public void execute() throws Throwable {
builder.sslProvider(SslProvider.OPENSSL).build();
}
});
}
private void testServerContextWithCombinedCertAndKeyInPem(SslProvider provider) throws SSLException {
String pem = "-----BEGIN CERTIFICATE-----\n" +
"MIIB1jCCAX0CCQDq4PSOirh7MDAJBgcqhkjOPQQBMHIxCzAJBgNVBAYTAlVTMQsw\n" +
"CQYDVQQIDAJDQTEMMAoGA1UEBwwDRm9vMQwwCgYDVQQKDANCYXIxDDAKBgNVBAsM\n" +
"A0JhejEQMA4GA1UEAwwHQmFyLmNvbTEaMBgGCSqGSIb3DQEJARYLZm9vQGJhci5j\n" +
"b20wHhcNMjIxMDAyMTYzODAyWhcNMjIxMjAxMTYzODAyWjB2MQswCQYDVQQGEwJV\n" +
"UzELMAkGA1UECAwCQ0ExDDAKBgNVBAcMA0ZvbzEMMAoGA1UECgwDQmFyMQwwCgYD\n" +
"VQQLDANiYXoxFDASBgNVBAMMC2Jhci5iYXIuYmF6MRowGAYJKoZIhvcNAQkBFgtm\n" +
"b29AYmFyLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABHiEmjPEqQbqXYMB\n" +
"nAPOv24rJf6MhTwHB0QC1suZ9q9XFUkalnqGryqf/emHs81RsXWKz4sCsbIJkmHz\n" +
"H8HYhmkwCQYHKoZIzj0EAQNIADBFAiBCgzxZ5qviemPdejt2WazSgwNJTbirzoQa\n" +
"FMv2XFTTCwIhANS3fZ8BulbYkdRWVEFwm2FGotqLfC60JA/gg/brlWSP\n" +
"-----END CERTIFICATE-----\n" +
"-----BEGIN EC PRIVATE KEY-----\n" +
"MHcCAQEEIF8RlaD0JX8u2Lryq1+AbYfDaTBPJnPSA8+N2L12YuuUoAoGCCqGSM49\n" +
"AwEHoUQDQgAEeISaM8SpBupdgwGcA86/bisl/oyFPAcHRALWy5n2r1cVSRqWeoav\n" +
"Kp/96YezzVGxdYrPiwKxsgmSYfMfwdiGaQ==\n" +
"-----END EC PRIVATE KEY-----";
ByteArrayInputStream certStream = new ByteArrayInputStream(pem.getBytes(CharsetUtil.US_ASCII));
ByteArrayInputStream keyStream = new ByteArrayInputStream(pem.getBytes(CharsetUtil.US_ASCII));
SslContext context = SslContextBuilder.forServer(certStream, keyStream, null)
.sslProvider(provider)
.clientAuth(ClientAuth.OPTIONAL)
.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertTrue(engine.getWantClientAuth());
assertFalse(engine.getNeedClientAuth());
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
@Test
public void testInvalidCipherJdk() throws Exception {
OpenSsl.ensureAvailability();
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() throws Throwable {
testInvalidCipher(SslProvider.JDK);
}
});
}
@Test
public void testInvalidCipherOpenSSL() throws Exception {
OpenSsl.ensureAvailability();
try {
// This may fail or not depending on the OpenSSL version used
// See https://github.com/openssl/openssl/issues/7196
testInvalidCipher(SslProvider.OPENSSL);
if (!OpenSsl.versionString().contains("1.1.1")) {
fail();
}
} catch (SSLException expected) {
// ok
}
}
@Test
public void testServerContextWithSecureRandom() throws Exception {
testServerContextWithSecureRandom(SslProvider.JDK, new SpySecureRandom());
}
@Test
public void testClientContextWithSecureRandom() throws Exception {
testClientContextWithSecureRandom(SslProvider.JDK, new SpySecureRandom());
}
private static void testKeyStoreType(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forServer(cert.certificate(), cert.privateKey())
.sslProvider(provider)
.keyStoreType("PKCS12");
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testInvalidCipher(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forClient()
.sslProvider(provider)
.ciphers(Collections.singleton("SOME_INVALID_CIPHER"))
.keyManager(cert.certificate(),
cert.privateKey())
.trustManager(cert.certificate());
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testClientContextFromFile(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forClient()
.sslProvider(provider)
.keyManager(cert.certificate(),
cert.privateKey())
.trustManager(cert.certificate())
.clientAuth(ClientAuth.OPTIONAL);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(engine.getWantClientAuth());
assertFalse(engine.getNeedClientAuth());
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testClientContext(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forClient()
.sslProvider(provider)
.keyManager(cert.key(), cert.cert())
.trustManager(cert.cert())
.clientAuth(ClientAuth.OPTIONAL);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(engine.getWantClientAuth());
assertFalse(engine.getNeedClientAuth());
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testServerContextFromFile(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forServer(cert.certificate(), cert.privateKey())
.sslProvider(provider)
.trustManager(cert.certificate())
.clientAuth(ClientAuth.OPTIONAL);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertTrue(engine.getWantClientAuth());
assertFalse(engine.getNeedClientAuth());
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testServerContext(SslProvider provider) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forServer(cert.key(), cert.cert())
.sslProvider(provider)
.trustManager(cert.cert())
.clientAuth(ClientAuth.REQUIRE);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(engine.getWantClientAuth());
assertTrue(engine.getNeedClientAuth());
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testServerContextWithSecureRandom(SslProvider provider,
SpySecureRandom secureRandom) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forServer(cert.key(), cert.cert())
.sslProvider(provider)
.secureRandom(secureRandom)
.trustManager(cert.cert())
.clientAuth(ClientAuth.REQUIRE);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(engine.getWantClientAuth());
assertTrue(engine.getNeedClientAuth());
assertTrue(secureRandom.getCount() > 0);
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testClientContextWithSecureRandom(SslProvider provider,
SpySecureRandom secureRandom) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder builder = SslContextBuilder.forClient()
.sslProvider(provider)
.secureRandom(secureRandom)
.keyManager(cert.key(), cert.cert())
.trustManager(cert.cert())
.clientAuth(ClientAuth.OPTIONAL);
SslContext context = builder.build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(engine.getWantClientAuth());
assertFalse(engine.getNeedClientAuth());
assertTrue(secureRandom.getCount() > 0);
engine.closeInbound();
engine.closeOutbound();
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
private static void testContextFromManagers(SslProvider provider) throws Exception {
final SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
KeyManager customKeyManager = new X509ExtendedKeyManager() {
@Override
public String[] getClientAliases(String s,
Principal[] principals) {
return new String[0];
}
@Override
public String chooseClientAlias(String[] strings,
Principal[] principals,
Socket socket) {
return "cert_sent_to_server";
}
@Override
public String[] getServerAliases(String s,
Principal[] principals) {
return new String[0];
}
@Override
public String chooseServerAlias(String s,
Principal[] principals,
Socket socket) {
return null;
}
@Override
public X509Certificate[] getCertificateChain(String s) {
X509Certificate[] certificates = new X509Certificate[1];
certificates[0] = cert.cert();
return new X509Certificate[0];
}
@Override
public PrivateKey getPrivateKey(String s) {
return cert.key();
}
};
TrustManager customTrustManager = new X509ExtendedTrustManager() {
@Override
public void checkClientTrusted(
X509Certificate[] x509Certificates, String s,
Socket socket) throws CertificateException { }
@Override
public void checkServerTrusted(
X509Certificate[] x509Certificates, String s,
Socket socket) throws CertificateException { }
@Override
public void checkClientTrusted(
X509Certificate[] x509Certificates, String s,
SSLEngine sslEngine) throws CertificateException { }
@Override
public void checkServerTrusted(
X509Certificate[] x509Certificates, String s,
SSLEngine sslEngine) throws CertificateException { }
@Override
public void checkClientTrusted(
X509Certificate[] x509Certificates, String s)
throws CertificateException { }
@Override
public void checkServerTrusted(
X509Certificate[] x509Certificates, String s)
throws CertificateException { }
@Override
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
};
SslContextBuilder client_builder = SslContextBuilder.forClient()
.sslProvider(provider)
.keyManager(customKeyManager)
.trustManager(customTrustManager)
.clientAuth(ClientAuth.OPTIONAL);
SslContext client_context = client_builder.build();
SSLEngine client_engine = client_context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(client_engine.getWantClientAuth());
assertFalse(client_engine.getNeedClientAuth());
client_engine.closeInbound();
client_engine.closeOutbound();
ReferenceCountUtil.release(client_engine);
SslContextBuilder server_builder = SslContextBuilder.forServer(customKeyManager)
.sslProvider(provider)
.trustManager(customTrustManager)
.clientAuth(ClientAuth.REQUIRE);
SslContext server_context = server_builder.build();
SSLEngine server_engine = server_context.newEngine(UnpooledByteBufAllocator.DEFAULT);
assertFalse(server_engine.getWantClientAuth());
assertTrue(server_engine.getNeedClientAuth());
server_engine.closeInbound();
server_engine.closeOutbound();
ReferenceCountUtil.release(server_engine);
ReferenceCountUtil.release(client_context);
ReferenceCountUtil.release(server_context);
}
private static final
|
SslContextBuilderTest
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/filter/MethodFilter.java
|
{
"start": 25603,
"end": 29082
}
|
interface ____ {
/**
* Void method that accepts a continuation.
*/
FilterReturnHandler VOID_WITH_CONTINUATION = (filterContext, returnValue, continuation) -> ExecutionFlow.just(continuation.afterMethodContext());
/**
* Void method.
*/
FilterReturnHandler VOID = (filterContext, returnValue, continuation) -> ExecutionFlow.just(filterContext);
/**
* Request handler that returns a new request.
*/
FilterReturnHandler REQUEST = (filterContext, returnValue, continuation) -> ExecutionFlow.just(
filterContext.withRequest(
(HttpRequest<?>) Objects.requireNonNull(returnValue, "Returned request must not be null, or mark the method as @Nullable")
)
);
/**
* Request handler that returns a new request (nullable).
*/
FilterReturnHandler REQUEST_NULLABLE = (filterContext, returnValue, continuation) -> {
if (returnValue == null) {
return ExecutionFlow.just(filterContext);
}
return ExecutionFlow.just(
filterContext.withRequest((HttpRequest<?>) returnValue)
);
};
/**
* Request handler that returns a response.
*/
FilterReturnHandler FROM_REQUEST_RESPONSE = (filterContext, returnValue, continuation) -> {
// cancel request pipeline, move immediately to response handling
return ExecutionFlow.just(
filterContext
.withResponse(
(HttpResponse<?>) Objects.requireNonNull(returnValue, "Returned response must not be null, or mark the method as @Nullable")
)
);
};
/**
* Request handler that returns a response (nullable).
*/
FilterReturnHandler FROM_REQUEST_RESPONSE_NULLABLE = (filterContext, returnValue, continuation) -> {
if (returnValue == null) {
return ExecutionFlow.just(filterContext);
}
// cancel request pipeline, move immediately to response handling
return ExecutionFlow.just(
filterContext.withResponse((HttpResponse<?>) returnValue)
);
};
/**
* Response handler that returns a new response.
*/
FilterReturnHandler FROM_RESPONSE_RESPONSE = (filterContext, returnValue, continuation) -> {
// cancel request pipeline, move immediately to response handling
return ExecutionFlow.just(
filterContext
.withResponse(
(HttpResponse<?>) Objects.requireNonNull(returnValue, "Returned response must not be null, or mark the method as @Nullable")
)
);
};
/**
* Response handler that returns a new response (nullable).
*/
FilterReturnHandler FROM_RESPONSE_RESPONSE_NULLABLE = (filterContext, returnValue, continuation) -> {
if (returnValue == null) {
return ExecutionFlow.just(filterContext);
}
// cancel request pipeline, move immediately to response handling
return ExecutionFlow.just(
filterContext.withResponse((HttpResponse<?>) returnValue)
);
};
@SuppressWarnings("java:S112")
// internal
|
FilterReturnHandler
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
|
{
"start": 2219,
"end": 10794
}
|
enum ____ {
INSTANCE;
JvmMetrics impl;
synchronized JvmMetrics init(String processName, String sessionId) {
if (impl == null) {
impl = create(processName, sessionId, DefaultMetricsSystem.instance());
}
return impl;
}
synchronized void shutdown() {
DefaultMetricsSystem.instance().unregisterSource(JvmMetrics.name());
impl = null;
}
}
@VisibleForTesting
public synchronized void registerIfNeeded(){
// during tests impl might exist, but is not registered
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("JvmMetrics") == null) {
ms.register(JvmMetrics.name(), JvmMetrics.description(), this);
}
}
static final float M = 1024*1024;
static public final float MEMORY_MAX_UNLIMITED_MB = -1;
final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
private ThreadMXBean threadMXBean;
final String processName, sessionId;
private JvmPauseMonitor pauseMonitor = null;
final ConcurrentHashMap<String, MetricsInfo[]> gcInfoCache =
new ConcurrentHashMap<String, MetricsInfo[]>();
private GcTimeMonitor gcTimeMonitor = null;
@VisibleForTesting
JvmMetrics(String processName, String sessionId, boolean useThreadMXBean) {
this.processName = processName;
this.sessionId = sessionId;
if (useThreadMXBean) {
this.threadMXBean = ManagementFactory.getThreadMXBean();
}
}
public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
this.pauseMonitor = pauseMonitor;
}
public void setGcTimeMonitor(GcTimeMonitor gcTimeMonitor) {
Preconditions.checkNotNull(gcTimeMonitor);
this.gcTimeMonitor = gcTimeMonitor;
}
public static JvmMetrics create(String processName, String sessionId,
MetricsSystem ms) {
// Reloading conf instead of getting from outside since it's redundant in
// code level to update all the callers across lots of modules,
// this method is called at most once for components (NN/DN/RM/NM/...)
// so that the overall cost is not expensive.
boolean useThreadMXBean = new Configuration().getBoolean(
CommonConfigurationKeys.HADOOP_METRICS_JVM_USE_THREAD_MXBEAN,
CommonConfigurationKeys.HADOOP_METRICS_JVM_USE_THREAD_MXBEAN_DEFAULT);
return ms.register(JvmMetrics.name(), JvmMetrics.description(),
new JvmMetrics(processName, sessionId, useThreadMXBean));
}
public static void reattach(MetricsSystem ms, JvmMetrics jvmMetrics) {
ms.register(JvmMetrics.name(), JvmMetrics.description(), jvmMetrics);
}
public static JvmMetrics initSingleton(String processName, String sessionId) {
return Singleton.INSTANCE.init(processName, sessionId);
}
/**
* Shutdown the JvmMetrics singleton. This is not necessary if the JVM itself
* is shutdown, but may be necessary for scenarios where JvmMetrics instance
* needs to be re-created while the JVM is still around. One such scenario
* is unit-testing.
*/
public static void shutdownSingleton() {
Singleton.INSTANCE.shutdown();
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)
.setContext("jvm").tag(ProcessName, processName)
.tag(SessionId, sessionId);
getMemoryUsage(rb);
getGcUsage(rb);
if (threadMXBean != null) {
getThreadUsage(rb);
} else {
getThreadUsageFromGroup(rb);
}
}
private void getMemoryUsage(MetricsRecordBuilder rb) {
MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage();
MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage();
Runtime runtime = Runtime.getRuntime();
rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
.addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
.addGauge(MemNonHeapMaxM, calculateMaxMemoryUsage(memNonHeap))
.addGauge(MemHeapUsedM, memHeap.getUsed() / M)
.addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
.addGauge(MemHeapMaxM, calculateMaxMemoryUsage(memHeap))
.addGauge(MemMaxM, runtime.maxMemory() / M);
}
private float calculateMaxMemoryUsage(MemoryUsage memHeap) {
long max = memHeap.getMax() ;
if (max == -1) {
return MEMORY_MAX_UNLIMITED_MB;
}
return max / M;
}
private void getGcUsage(MetricsRecordBuilder rb) {
long count = 0;
long timeMillis = 0;
for (GarbageCollectorMXBean gcBean : gcBeans) {
if (gcBean.getName() != null) {
String name = gcBean.getName();
// JDK-8265136 Skip concurrent phase
if (name.startsWith("ZGC") && name.endsWith("Cycles")) {
continue;
}
}
long c = gcBean.getCollectionCount();
long t = gcBean.getCollectionTime();
MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
count += c;
timeMillis += t;
}
rb.addCounter(GcCount, count)
.addCounter(GcTimeMillis, timeMillis);
if (pauseMonitor != null) {
rb.addCounter(GcNumWarnThresholdExceeded,
pauseMonitor.getNumGcWarnThresholdExceeded());
rb.addCounter(GcNumInfoThresholdExceeded,
pauseMonitor.getNumGcInfoThresholdExceeded());
rb.addCounter(GcTotalExtraSleepTime,
pauseMonitor.getTotalGcExtraSleepTime());
}
if (gcTimeMonitor != null) {
rb.addGauge(GcTimePercentage,
gcTimeMonitor.getLatestGcData().getGcTimePercentage());
}
}
private MetricsInfo[] getGcInfo(String gcName) {
MetricsInfo[] gcInfo = gcInfoCache.get(gcName);
if (gcInfo == null) {
gcInfo = new MetricsInfo[2];
gcInfo[0] = Interns.info("GcCount" + gcName, "GC Count for " + gcName);
gcInfo[1] = Interns
.info("GcTimeMillis" + gcName, "GC Time for " + gcName);
MetricsInfo[] previousGcInfo = gcInfoCache.putIfAbsent(gcName, gcInfo);
if (previousGcInfo != null) {
return previousGcInfo;
}
}
return gcInfo;
}
private void getThreadUsage(MetricsRecordBuilder rb) {
int threadsNew = 0;
int threadsRunnable = 0;
int threadsBlocked = 0;
int threadsWaiting = 0;
int threadsTimedWaiting = 0;
int threadsTerminated = 0;
long threadIds[] = threadMXBean.getAllThreadIds();
for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
if (threadInfo == null) continue; // race protection
switch (threadInfo.getThreadState()) {
case NEW: threadsNew++; break;
case RUNNABLE: threadsRunnable++; break;
case BLOCKED: threadsBlocked++; break;
case WAITING: threadsWaiting++; break;
case TIMED_WAITING: threadsTimedWaiting++; break;
case TERMINATED: threadsTerminated++; break;
}
}
rb.addGauge(ThreadsNew, threadsNew)
.addGauge(ThreadsRunnable, threadsRunnable)
.addGauge(ThreadsBlocked, threadsBlocked)
.addGauge(ThreadsWaiting, threadsWaiting)
.addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
.addGauge(ThreadsTerminated, threadsTerminated);
}
private void getThreadUsageFromGroup(MetricsRecordBuilder rb) {
int threadsNew = 0;
int threadsRunnable = 0;
int threadsBlocked = 0;
int threadsWaiting = 0;
int threadsTimedWaiting = 0;
int threadsTerminated = 0;
ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
Thread[] threads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(threads);
for (Thread thread : threads) {
if (thread == null) {
// race protection
continue;
}
switch (thread.getState()) {
case NEW: threadsNew++; break;
case RUNNABLE: threadsRunnable++; break;
case BLOCKED: threadsBlocked++; break;
case WAITING: threadsWaiting++; break;
case TIMED_WAITING: threadsTimedWaiting++; break;
case TERMINATED: threadsTerminated++; break;
default:
}
}
rb.addGauge(ThreadsNew, threadsNew)
.addGauge(ThreadsRunnable, threadsRunnable)
.addGauge(ThreadsBlocked, threadsBlocked)
.addGauge(ThreadsWaiting, threadsWaiting)
.addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
.addGauge(ThreadsTerminated, threadsTerminated);
}
}
|
Singleton
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ForOverrideCheckerTest.java
|
{
"start": 5736,
"end": 6158
}
|
class ____ extends test.ExtendMe {
@Override
protected int overrideMe() {
return super.overrideMe();
}
}
""")
.doTest();
}
@Test
public void userCanCallSuperFromOverridden_explicitQualification() {
compilationHelper
.addSourceLines(
"test/Test.java",
"package test2;",
"public
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java
|
{
"start": 4018,
"end": 5887
}
|
class ____ extends AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Evaluator.class);
protected Evaluator(DriverContext driverContext, EvalOperator.ExpressionEvaluator field) {
super(driverContext, field);
}
@Override
protected String name() {
return "MvCount";
}
@Override
protected Block evalNullable(Block block) {
try (var builder = driverContext.blockFactory().newIntBlockBuilder(block.getPositionCount())) {
for (int p = 0; p < block.getPositionCount(); p++) {
int valueCount = block.getValueCount(p);
if (valueCount == 0) {
builder.appendNull();
continue;
}
builder.appendInt(valueCount);
}
return builder.build();
}
}
@Override
protected Block evalNotNullable(Block block) {
try (var builder = driverContext.blockFactory().newIntVectorFixedBuilder(block.getPositionCount())) {
for (int p = 0; p < block.getPositionCount(); p++) {
builder.appendInt(block.getValueCount(p));
}
return builder.build().asBlock();
}
}
@Override
protected Block evalSingleValuedNullable(Block ref) {
return evalNullable(ref);
}
@Override
protected Block evalSingleValuedNotNullable(Block ref) {
return driverContext.blockFactory().newConstantIntBlockWith(1, ref.getPositionCount());
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED + field.baseRamBytesUsed();
}
}
}
|
Evaluator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/component/StructNestedComponentAssociationErrorTest.java
|
{
"start": 4094,
"end": 4195
}
|
class ____ {
Person3 person;
}
@Embeddable
@Struct(name = "person_type")
public static
|
AuthorInfo3
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
|
{
"start": 17939,
"end": 20157
}
|
class ____
implements CleanupStrategies.CleanupStrategy {
private static final long serialVersionUID = 3109278796506988980L;
/**
* @deprecated Use {@link
* org.apache.flink.state.rocksdb.RocksDBConfigurableOptions#COMPACT_FILTER_PERIODIC_COMPACTION_TIME}
* instead.
*/
@Deprecated static final Duration DEFAULT_PERIODIC_COMPACTION_TIME = Duration.ofDays(30);
/**
* @deprecated Use {@link
* org.apache.flink.state.rocksdb.RocksDBConfigurableOptions#COMPACT_FILTER_QUERY_TIME_AFTER_NUM_ENTRIES}
* instead.
*/
@Deprecated
static final RocksdbCompactFilterCleanupStrategy
DEFAULT_ROCKSDB_COMPACT_FILTER_CLEANUP_STRATEGY =
new RocksdbCompactFilterCleanupStrategy(1000L);
/**
* Number of state entries to process by compaction filter before updating current
* timestamp.
*/
private final long queryTimeAfterNumEntries;
/**
* Periodic compaction could speed up expired state entries cleanup, especially for state
* entries rarely accessed. Files older than this value will be picked up for compaction,
* and re-written to the same level as they were before. It makes sure a file goes through
* compaction filters periodically. 0 means turning off periodic compaction.
*/
private final Duration periodicCompactionTime;
private RocksdbCompactFilterCleanupStrategy(long queryTimeAfterNumEntries) {
this(queryTimeAfterNumEntries, DEFAULT_PERIODIC_COMPACTION_TIME);
}
private RocksdbCompactFilterCleanupStrategy(
long queryTimeAfterNumEntries, Duration periodicCompactionTime) {
this.queryTimeAfterNumEntries = queryTimeAfterNumEntries;
this.periodicCompactionTime = periodicCompactionTime;
}
public long getQueryTimeAfterNumEntries() {
return queryTimeAfterNumEntries;
}
public Duration getPeriodicCompactionTime() {
return periodicCompactionTime;
}
}
}
|
RocksdbCompactFilterCleanupStrategy
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java
|
{
"start": 19346,
"end": 19914
}
|
class ____ {
@Deprecated
@InlineMe(replacement = "this.baz()")
public Client foo() {
return baz();
}
@Deprecated
@InlineMe(replacement = "this.baz()")
public Client bar() {
return baz();
}
public Client baz() {
return this;
}
}
""")
.expectUnchanged()
.addInputLines(
"Caller.java",
"""
public final
|
Client
|
java
|
spring-projects__spring-security
|
kerberos/kerberos-core/src/main/java/org/springframework/security/kerberos/authentication/sun/SunJaasKerberosTicketValidator.java
|
{
"start": 9735,
"end": 9843
}
|
class ____ is not needed and you can have different configurations
* in one JVM.
*/
private static final
|
it
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteChannelStateChecker.java
|
{
"start": 1662,
"end": 6471
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(RemoteChannelStateChecker.class);
private final ResultPartitionID resultPartitionId;
private final String taskNameWithSubtask;
public RemoteChannelStateChecker(
ResultPartitionID resultPartitionId, String taskNameWithSubtask) {
this.resultPartitionId = resultPartitionId;
this.taskNameWithSubtask = taskNameWithSubtask;
}
public boolean isProducerReadyOrAbortConsumption(ResponseHandle responseHandle) {
Either<ExecutionState, Throwable> result = responseHandle.getProducerExecutionState();
ExecutionState consumerExecutionState = responseHandle.getConsumerExecutionState();
if (!isConsumerStateValidForConsumption(consumerExecutionState)) {
LOG.debug(
"Ignore a partition producer state notification for task {}, because it's not running.",
taskNameWithSubtask);
} else if (result.isLeft() || result.right() instanceof TimeoutException) {
boolean isProducerConsumerReady = isProducerConsumerReady(responseHandle);
if (isProducerConsumerReady) {
return true;
} else {
abortConsumptionOrIgnoreCheckResult(responseHandle);
}
} else {
handleFailedCheckResult(responseHandle);
}
return false;
}
private static boolean isConsumerStateValidForConsumption(
ExecutionState consumerExecutionState) {
return consumerExecutionState == ExecutionState.RUNNING
|| consumerExecutionState == ExecutionState.INITIALIZING
|| consumerExecutionState == ExecutionState.DEPLOYING;
}
private boolean isProducerConsumerReady(ResponseHandle responseHandle) {
ExecutionState producerState = getProducerState(responseHandle);
return producerState == ExecutionState.SCHEDULED
|| producerState == ExecutionState.DEPLOYING
|| producerState == ExecutionState.INITIALIZING
|| producerState == ExecutionState.RUNNING
|| producerState == ExecutionState.FINISHED;
}
private void abortConsumptionOrIgnoreCheckResult(ResponseHandle responseHandle) {
ExecutionState producerState = getProducerState(responseHandle);
if (producerState == ExecutionState.CANCELING
|| producerState == ExecutionState.CANCELED
|| producerState == ExecutionState.FAILED) {
// The producing execution has been canceled or failed. We
// don't need to re-trigger the request since it cannot
// succeed.
if (LOG.isDebugEnabled()) {
LOG.debug(
"Cancelling task {} after the producer of partition {} with attempt ID {} has entered state {}.",
taskNameWithSubtask,
resultPartitionId.getPartitionId(),
resultPartitionId.getProducerId(),
producerState);
}
responseHandle.cancelConsumption();
} else {
// Any other execution state is unexpected. Currently, only
// state CREATED is left out of the checked states. If we
// see a producer in this state, something went wrong with
// scheduling in topological order.
final String msg =
String.format(
"Producer with attempt ID %s of partition %s in unexpected state %s.",
resultPartitionId.getProducerId(),
resultPartitionId.getPartitionId(),
producerState);
responseHandle.failConsumption(new IllegalStateException(msg));
}
}
private static ExecutionState getProducerState(ResponseHandle responseHandle) {
Either<ExecutionState, Throwable> result = responseHandle.getProducerExecutionState();
return result.isLeft() ? result.left() : ExecutionState.RUNNING;
}
private void handleFailedCheckResult(ResponseHandle responseHandle) {
Throwable throwable = responseHandle.getProducerExecutionState().right();
if (throwable instanceof PartitionProducerDisposedException) {
String msg =
String.format(
"Producer %s of partition %s disposed. Cancelling execution.",
resultPartitionId.getProducerId(), resultPartitionId.getPartitionId());
LOG.info(msg, throwable);
responseHandle.cancelConsumption();
} else {
responseHandle.failConsumption(throwable);
}
}
}
|
RemoteChannelStateChecker
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/MLPredictTestPrograms.java
|
{
"start": 1762,
"end": 16296
}
|
class ____ {
static final String[] FEATURES_SCHEMA =
new String[] {"id INT PRIMARY KEY NOT ENFORCED", "feature STRING"};
static final Row[] FEATURES_BEFORE_DATA =
new Row[] {
Row.ofKind(RowKind.INSERT, 1, "Flink"),
Row.ofKind(RowKind.INSERT, 2, "Spark"),
Row.ofKind(RowKind.INSERT, 3, "Hive")
};
static final Row[] FEATURES_AFTER_DATA =
new Row[] {
Row.ofKind(RowKind.INSERT, 4, "Mysql"), Row.ofKind(RowKind.INSERT, 5, "Postgres")
};
public static final SourceTestStep SIMPLE_FEATURES_SOURCE =
SourceTestStep.newBuilder("features")
.addSchema(FEATURES_SCHEMA)
.producedValues(FEATURES_BEFORE_DATA)
.build();
static final SourceTestStep RESTORE_FEATURES_TABLE =
SourceTestStep.newBuilder("features")
.addSchema(FEATURES_SCHEMA)
.producedBeforeRestore(FEATURES_BEFORE_DATA)
.producedAfterRestore(FEATURES_AFTER_DATA)
.build();
// -------------------------------------------------------------------------------------------
static final String[] MODEL_INPUT_SCHEMA = new String[] {"feature STRING"};
static final String[] MODEL_OUTPUT_SCHEMA = new String[] {"category STRING"};
static final Map<Row, List<Row>> MODEL_DATA =
new HashMap<>() {
{
put(
Row.ofKind(RowKind.INSERT, "Flink"),
List.of(Row.ofKind(RowKind.INSERT, "Big Data")));
put(
Row.ofKind(RowKind.INSERT, "Spark"),
List.of(Row.ofKind(RowKind.INSERT, "Big Data")));
put(
Row.ofKind(RowKind.INSERT, "Hive"),
List.of(Row.ofKind(RowKind.INSERT, "Big Data")));
put(
Row.ofKind(RowKind.INSERT, "Mysql"),
List.of(Row.ofKind(RowKind.INSERT, "Database")));
put(
Row.ofKind(RowKind.INSERT, "Postgres"),
List.of(Row.ofKind(RowKind.INSERT, "Database")));
}
};
public static final ModelTestStep SYNC_MODEL =
ModelTestStep.newBuilder("chatgpt")
.addInputSchema(MODEL_INPUT_SCHEMA)
.addOutputSchema(MODEL_OUTPUT_SCHEMA)
.data(MODEL_DATA)
.build();
public static final ModelTestStep ASYNC_MODEL =
ModelTestStep.newBuilder("chatgpt")
.addInputSchema(MODEL_INPUT_SCHEMA)
.addOutputSchema(MODEL_OUTPUT_SCHEMA)
.addOption("async", "true")
.data(MODEL_DATA)
.build();
// -------------------------------------------------------------------------------------------
static final String[] SINK_SCHEMA =
new String[] {"id INT PRIMARY KEY NOT ENFORCED", "feature STRING", "category STRING"};
static final SinkTestStep RESTORE_SINK_TABLE =
SinkTestStep.newBuilder("sink_t")
.addSchema(SINK_SCHEMA)
.consumedBeforeRestore(
"+I[1, Flink, Big Data]",
"+I[2, Spark, Big Data]",
"+I[3, Hive, Big Data]")
.consumedAfterRestore("+I[4, Mysql, Database]", "+I[5, Postgres, Database]")
.build();
public static final SinkTestStep SIMPLE_SINK =
SinkTestStep.newBuilder("sink")
.addSchema(SINK_SCHEMA)
.consumedValues(
"+I[1, Flink, Big Data]",
"+I[2, Spark, Big Data]",
"+I[3, Hive, Big Data]")
.build();
// -------------------------------------------------------------------------------------------
public static final TableTestProgram SYNC_ML_PREDICT =
TableTestProgram.of("sync-ml-predict", "ml-predict in sync mode.")
.setupTableSource(RESTORE_FEATURES_TABLE)
.setupModel(SYNC_MODEL)
.setupTableSink(RESTORE_SINK_TABLE)
.runSql(
"INSERT INTO sink_t SELECT * FROM ML_PREDICT(TABLE features, MODEL chatgpt, DESCRIPTOR(feature))")
.build();
public static final TableTestProgram ASYNC_UNORDERED_ML_PREDICT =
TableTestProgram.of("async-unordered-ml-predict", "ml-predict in async unordered mode.")
.setupTableSource(RESTORE_FEATURES_TABLE)
.setupModel(ASYNC_MODEL)
.setupTableSink(RESTORE_SINK_TABLE)
.setupConfig(
ExecutionConfigOptions.TABLE_EXEC_ASYNC_ML_PREDICT_OUTPUT_MODE,
ExecutionConfigOptions.AsyncOutputMode.ALLOW_UNORDERED)
.runSql(
"INSERT INTO sink_t SELECT * FROM ML_PREDICT(TABLE features, MODEL chatgpt, DESCRIPTOR(feature))")
.build();
public static final TableTestProgram SYNC_ML_PREDICT_WITH_RUNTIME_CONFIG =
TableTestProgram.of(
"sync-ml-predict-with-runtime-options",
"ml-predict in sync mode with runtime config.")
.setupTableSource(RESTORE_FEATURES_TABLE)
.setupModel(ASYNC_MODEL)
.setupTableSink(RESTORE_SINK_TABLE)
.runSql(
"INSERT INTO sink_t SELECT * FROM ML_PREDICT(TABLE features, MODEL chatgpt, DESCRIPTOR(feature), MAP['async', 'false'])")
.build();
public static final TableTestProgram SYNC_ML_PREDICT_TABLE_API =
TableTestProgram.of(
"sync-ml-predict-table-api", "ml-predict in sync mode using Table API.")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupModel(SYNC_MODEL)
.setupTableSink(SIMPLE_SINK)
.runTableApi(
env ->
env.fromCall(
"ML_PREDICT",
env.from("features").asArgument("INPUT"),
env.fromModel("chatgpt").asArgument("MODEL"),
descriptor("feature").asArgument("ARGS")),
"sink")
.build();
public static final TableTestProgram ASYNC_ML_PREDICT_TABLE_API =
TableTestProgram.of(
"async-ml-predict-table-api",
"ml-predict in async mode using Table API.")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupModel(ASYNC_MODEL)
.setupTableSink(SIMPLE_SINK)
.setupConfig(
ExecutionConfigOptions.TABLE_EXEC_ASYNC_ML_PREDICT_OUTPUT_MODE,
ExecutionConfigOptions.AsyncOutputMode.ALLOW_UNORDERED)
.runTableApi(
env ->
env.fromCall(
"ML_PREDICT",
env.from("features").asArgument("INPUT"),
env.fromModel("chatgpt").asArgument("MODEL"),
descriptor("feature").asArgument("ARGS"),
Expressions.lit(
Map.of("async", "true"),
DataTypes.MAP(
DataTypes.STRING(),
DataTypes.STRING())
.notNull())
.asArgument("CONFIG")),
"sink")
.build();
public static final TableTestProgram ASYNC_ML_PREDICT_TABLE_API_MAP_EXPRESSION_CONFIG =
TableTestProgram.of(
"async-ml-predict-table-api-map-expression-config",
"ml-predict in async mode using Table API and map expression.")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupModel(ASYNC_MODEL)
.setupTableSink(SIMPLE_SINK)
.setupConfig(
ExecutionConfigOptions.TABLE_EXEC_ASYNC_ML_PREDICT_OUTPUT_MODE,
ExecutionConfigOptions.AsyncOutputMode.ALLOW_UNORDERED)
.runTableApi(
env ->
env.fromCall(
"ML_PREDICT",
env.from("features").asArgument("INPUT"),
env.fromModel("chatgpt").asArgument("MODEL"),
descriptor("feature").asArgument("ARGS"),
Expressions.map(
"async",
"true",
"max-concurrent-operations",
"10")
.asArgument("CONFIG")),
"sink")
.build();
public static final TableTestProgram ML_PREDICT_MODEL_API =
TableTestProgram.of("ml-predict-model-api", "ml-predict using model API")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupModel(SYNC_MODEL)
.setupTableSink(SIMPLE_SINK)
.runTableApi(
env ->
env.fromModel("chatgpt")
.predict(
env.from("features"), ColumnList.of("feature")),
"sink")
.build();
public static final TableTestProgram ASYNC_ML_PREDICT_MODEL_API =
TableTestProgram.of("async-ml-predict-model-api", "async ml-predict using model API")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupModel(ASYNC_MODEL)
.setupTableSink(SIMPLE_SINK)
.setupConfig(
ExecutionConfigOptions.TABLE_EXEC_ASYNC_ML_PREDICT_OUTPUT_MODE,
ExecutionConfigOptions.AsyncOutputMode.ALLOW_UNORDERED)
.runTableApi(
env ->
env.fromModel("chatgpt")
.predict(
env.from("features"),
ColumnList.of("feature"),
Map.of(
"async",
"true",
"max-concurrent-operations",
"10")),
"sink")
.build();
public static final TableTestProgram ML_PREDICT_ANON_MODEL_API =
TableTestProgram.of(
"ml-predict-anonymous-model-api",
"ml-predict using anonymous model API")
.setupTableSource(SIMPLE_FEATURES_SOURCE)
.setupTableSink(SIMPLE_SINK)
.runTableApi(
env ->
env.from(
ModelDescriptor.forProvider("values")
.inputSchema(
Schema.newBuilder()
.column(
"feature",
"STRING")
.build())
.outputSchema(
Schema.newBuilder()
.column(
"category",
"STRING")
.build())
.option(
"data-id",
TestValuesModelFactory
.registerData(
SYNC_MODEL
.data))
.build())
.predict(
env.from("features"), ColumnList.of("feature")),
"sink")
.build();
}
|
MLPredictTestPrograms
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/DateParserTest_sql.java
|
{
"start": 320,
"end": 2530
}
|
class ____ extends TestCase {
public void f_test_date_0() throws Exception {
DefaultJSONParser parser = new DefaultJSONParser("1294552193254");
java.sql.Date date = parser.parseObject(java.sql.Date.class);
Assert.assertEquals(new java.sql.Date(1294552193254L), date);
parser.close();
}
public void test_date_1() throws Exception {
int features = JSON.DEFAULT_PARSER_FEATURE;
features = Feature.config(features, Feature.AllowISO8601DateFormat, true);
DefaultJSONParser parser = new DefaultJSONParser("\"2011-01-09T13:49:53.254\"", ParserConfig.getGlobalInstance(), features);
java.sql.Date date = parser.parseObject(java.sql.Date.class);
Assert.assertEquals(new java.sql.Date(1294552193254L), date);
parser.close();
}
public void test_date_2() throws Exception {
int features = JSON.DEFAULT_PARSER_FEATURE;
DefaultJSONParser parser = new DefaultJSONParser("new Date(1294552193254)", ParserConfig.getGlobalInstance(), features);
java.sql.Date date = parser.parseObject(java.sql.Date.class);
Assert.assertEquals(new java.sql.Date(1294552193254L), date);
parser.close();
}
public void test_date_3() throws Exception {
int features = JSON.DEFAULT_PARSER_FEATURE;
features = Feature.config(features, Feature.AllowISO8601DateFormat, true);
DefaultJSONParser parser = new DefaultJSONParser("\"2011-01-09T13:49:53\"", ParserConfig.getGlobalInstance(), features);
java.sql.Date date = parser.parseObject(java.sql.Date.class);
Assert.assertEquals(new java.sql.Date(1294552193000L), date);
parser.close();
}
public void test_date_4() throws Exception {
int features = JSON.DEFAULT_PARSER_FEATURE;
features = Feature.config(features, Feature.AllowISO8601DateFormat, true);
DefaultJSONParser parser = new DefaultJSONParser("\"2011-01-09\"", ParserConfig.getGlobalInstance(), features);
java.sql.Date date = parser.parseObject(java.sql.Date.class);
Assert.assertEquals(new java.sql.Date(1294502400000L), date);
parser.close();
}
}
|
DateParserTest_sql
|
java
|
quarkusio__quarkus
|
extensions/quartz/runtime/src/main/java/io/quarkus/quartz/runtime/QuartzSchedulerImpl.java
|
{
"start": 41312,
"end": 42014
}
|
class ____");
}
return super.setAsyncTask(asyncTask);
}
@Override
public Trigger schedule() {
checkScheduled();
if (task == null && asyncTask == null) {
throw new IllegalStateException("Either sync or async task must be set");
}
scheduled = true;
SyntheticScheduled scheduled = new SyntheticScheduled(identity, cron, every, 0, TimeUnit.MINUTES, delayed,
overdueGracePeriod, concurrentExecution, skipPredicate, timeZone, implementation, executionMaxDelay);
return createJobDefinitionQuartzTrigger(this, scheduled, null);
}
}
|
instead
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringXPathFilterWithNamespaceOnImportRouteContextTest.java
|
{
"start": 1088,
"end": 1592
}
|
class ____ extends XPathFilterTest {
@Override
@BeforeEach
public void setUp() throws Exception {
matchingBody = "<person name='James' city='London' xmlns='http://example.com/person'/>";
super.setUp();
}
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/xpathChoiceWithNamespaceOnRouteContext.xml");
}
}
|
SpringXPathFilterWithNamespaceOnImportRouteContextTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/serialization/EntityManagerDeserializationTest.java
|
{
"start": 945,
"end": 2694
}
|
class ____ {
@Test
@JiraKey(value = "HHH-11555")
public void deserializedEntityManagerPersistenceExceptionManagementTest(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
final EntityManager deserializedSession;
try {
deserializedSession = spoofSerialization( entityManager );
}
catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
Assertions.assertThrows(
PersistenceException.class,
() -> {
deserializedSession.getTransaction().begin();
TestEntity entity = new TestEntity();
entity.setName( "Andrea" );
deserializedSession.persist( entity );
entity.setName( null );
deserializedSession.flush();
},
"Should have thrown a PersistenceException"
);
}
finally {
if ( deserializedSession != null ) {
deserializedSession.getTransaction().rollback();
deserializedSession.close();
}
}
}
);
}
private EntityManager spoofSerialization(EntityManager entityManager) throws IOException {
try(ByteArrayOutputStream serBaOut = new ByteArrayOutputStream()) {
// Serialize the incoming out to memory
ObjectOutputStream serOut = new ObjectOutputStream( serBaOut );
serOut.writeObject( entityManager );
// Now, re-constitute the model from memory
try (ByteArrayInputStream serBaIn = new ByteArrayInputStream( serBaOut.toByteArray() ); ObjectInputStream serIn = new ObjectInputStream( serBaIn )) {
EntityManager outgoing = (EntityManager) serIn.readObject();
return outgoing;
}
}
catch (ClassNotFoundException cnfe) {
throw new IOException( "Unable to locate
|
EntityManagerDeserializationTest
|
java
|
apache__camel
|
components/camel-twitter/src/generated/java/org/apache/camel/component/twitter/directmessage/TwitterDirectMessageEndpointUriFactory.java
|
{
"start": 531,
"end": 3969
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":user";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(43);
props.add("accessToken");
props.add("accessTokenSecret");
props.add("backoffErrorThreshold");
props.add("backoffIdleThreshold");
props.add("backoffMultiplier");
props.add("bridgeErrorHandler");
props.add("consumerKey");
props.add("consumerSecret");
props.add("count");
props.add("delay");
props.add("distanceMetric");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("extendedMode");
props.add("filterOld");
props.add("greedy");
props.add("httpProxyHost");
props.add("httpProxyPassword");
props.add("httpProxyPort");
props.add("httpProxyUser");
props.add("initialDelay");
props.add("lang");
props.add("latitude");
props.add("lazyStartProducer");
props.add("locations");
props.add("longitude");
props.add("numberOfPages");
props.add("pollStrategy");
props.add("radius");
props.add("repeatCount");
props.add("runLoggingLevel");
props.add("scheduledExecutorService");
props.add("scheduler");
props.add("schedulerProperties");
props.add("sendEmptyMessageWhenIdle");
props.add("sinceId");
props.add("sortById");
props.add("startScheduler");
props.add("timeUnit");
props.add("type");
props.add("useFixedDelay");
props.add("user");
props.add("userIds");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(4);
secretProps.add("accessToken");
secretProps.add("accessTokenSecret");
secretProps.add("consumerKey");
secretProps.add("consumerSecret");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
Map<String, String> prefixes = new HashMap<>(1);
prefixes.put("schedulerProperties", "scheduler.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
return "twitter-directmessage".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "user", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
TwitterDirectMessageEndpointUriFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/SqmStar.java
|
{
"start": 444,
"end": 1326
}
|
class ____ extends AbstractSqmExpression<Object> {
public SqmStar(NodeBuilder builder) {
super( null, builder );
}
@Override
public SqmStar copy(SqmCopyContext context) {
final SqmStar existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
return context.registerCopy( this, new SqmStar( nodeBuilder() ) );
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitStar( this );
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
hql.append( "*" );
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof SqmStar;
}
@Override
public int hashCode() {
return 1;
}
@Override
public boolean isCompatible(Object object) {
return equals( object );
}
@Override
public int cacheHashCode() {
return hashCode();
}
}
|
SqmStar
|
java
|
alibaba__nacos
|
console/src/main/java/com/alibaba/nacos/console/handler/impl/noop/config/HistoryNoopHandler.java
|
{
"start": 1591,
"end": 3059
}
|
class ____ implements HistoryHandler {
private static final String MCP_NOT_ENABLED_MESSAGE = "Current functionMode is `naming`, config module is disabled.";
@Override
public ConfigHistoryDetailInfo getConfigHistoryInfo(String dataId, String group, String namespaceId, Long nid)
throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
@Override
public Page<ConfigHistoryBasicInfo> listConfigHistory(String dataId, String group, String namespaceId,
Integer pageNo, Integer pageSize) throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
@Override
public ConfigHistoryDetailInfo getPreviousConfigHistoryInfo(String dataId, String group, String namespaceId,
Long id) throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
@Override
public List<ConfigBasicInfo> getConfigsByTenant(String namespaceId) throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
}
|
HistoryNoopHandler
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/main/java/io/quarkus/resteasy/reactive/server/deployment/CustomResourceProducersGenerator.java
|
{
"start": 21599,
"end": 21767
}
|
enum ____ {
QUERY,
HEADER,
PATH,
MATRIX,
COOKIE,
OTHER
}
}
}
|
CustomProducerParameterType
|
java
|
google__auto
|
common/src/test/java/com/google/auto/common/MoreTypesTest.java
|
{
"start": 22764,
"end": 23353
}
|
class ____<T> {
private List<String> listOfString;
private List<? extends CharSequence> listOfExtendsCharSequence;
private List<? super CharSequence> listOfSuperCharSequence;
private List<T> listOfT;
private List<T[]> listOfArrayOfT;
private T t;
private T[] arrayOfT;
private List<T>[] arrayOfListOfT;
private Map<?, String> mapWildcardToString;
private Map<String, ?> mapStringToWildcard;
}
// The type of every field here is such that casting to it doesn't provoke an "unchecked" warning.
@SuppressWarnings("unused")
private static
|
Unchecked
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
|
{
"start": 45824,
"end": 65763
}
|
class ____ implements RoutingChangesObserver {
// Map of RestoreUUID to a of changes to the shards' restore statuses
private final Map<String, Map<ShardId, ShardRestoreStatus>> shardChanges = new HashMap<>();
@Override
public void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) {
// mark snapshot as completed
if (initializingShard.primary()) {
RecoverySource recoverySource = initializingShard.recoverySource();
if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) {
changes(recoverySource).put(
initializingShard.shardId(),
new ShardRestoreStatus(initializingShard.currentNodeId(), RestoreInProgress.State.SUCCESS)
);
}
}
}
@Override
public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {
if (failedShard.primary() && failedShard.initializing()) {
RecoverySource recoverySource = failedShard.recoverySource();
if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) {
// mark restore entry for this shard as failed when it's due to a file corruption. There is no need wait on retries
// to restore this shard on another node if the snapshot files are corrupt. In case where a node just left or crashed,
// however, we only want to acknowledge the restore operation once it has been successfully restored on another node.
if (unassignedInfo.failure() != null && Lucene.isCorruptionException(unassignedInfo.failure().getCause())) {
changes(recoverySource).put(
failedShard.shardId(),
new ShardRestoreStatus(
failedShard.currentNodeId(),
RestoreInProgress.State.FAILURE,
unassignedInfo.failure().getCause().getMessage()
)
);
}
}
}
}
@Override
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
// if we force an empty primary, we should also fail the restore entry
if (unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT
&& initializedShard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
changes(unassignedShard.recoverySource()).put(
unassignedShard.shardId(),
new ShardRestoreStatus(
null,
RestoreInProgress.State.FAILURE,
"recovery source type changed from snapshot to " + initializedShard.recoverySource()
)
);
}
}
@Override
public void unassignedInfoUpdated(ShardRouting unassignedShard, UnassignedInfo newUnassignedInfo) {
RecoverySource recoverySource = unassignedShard.recoverySource();
if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) {
if (newUnassignedInfo.lastAllocationStatus() == UnassignedInfo.AllocationStatus.DECIDERS_NO) {
String reason = "shard could not be allocated to any of the nodes";
changes(recoverySource).put(
unassignedShard.shardId(),
new ShardRestoreStatus(unassignedShard.currentNodeId(), RestoreInProgress.State.FAILURE, reason)
);
}
}
}
/**
* Helper method that creates update entry for the given recovery source's restore uuid
* if such an entry does not exist yet.
*/
private Map<ShardId, ShardRestoreStatus> changes(RecoverySource recoverySource) {
assert recoverySource.getType() == RecoverySource.Type.SNAPSHOT;
return shardChanges.computeIfAbsent(((SnapshotRecoverySource) recoverySource).restoreUUID(), k -> new HashMap<>());
}
public RestoreInProgress applyChanges(final RestoreInProgress oldRestore) {
if (shardChanges.isEmpty() == false) {
RestoreInProgress.Builder builder = new RestoreInProgress.Builder();
for (RestoreInProgress.Entry entry : oldRestore) {
Map<ShardId, ShardRestoreStatus> updates = shardChanges.get(entry.uuid());
Map<ShardId, ShardRestoreStatus> shardStates = entry.shards();
if (updates != null && updates.isEmpty() == false) {
Map<ShardId, ShardRestoreStatus> shardsBuilder = new HashMap<>(shardStates);
for (Map.Entry<ShardId, ShardRestoreStatus> shard : updates.entrySet()) {
ShardId shardId = shard.getKey();
ShardRestoreStatus status = shardStates.get(shardId);
if (status == null || status.state().completed() == false) {
shardsBuilder.put(shardId, shard.getValue());
}
}
Map<ShardId, ShardRestoreStatus> shards = Map.copyOf(shardsBuilder);
RestoreInProgress.State newState = overallState(RestoreInProgress.State.STARTED, shards);
builder.add(
new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(), newState, entry.quiet(), entry.indices(), shards)
);
} else {
builder.add(entry);
}
}
return builder.build();
} else {
return oldRestore;
}
}
}
private static RestoreInProgress.State overallState(
RestoreInProgress.State nonCompletedState,
Map<ShardId, RestoreInProgress.ShardRestoreStatus> shards
) {
boolean hasFailed = false;
for (RestoreInProgress.ShardRestoreStatus status : shards.values()) {
if (status.state().completed() == false) {
return nonCompletedState;
}
if (status.state() == RestoreInProgress.State.FAILURE) {
hasFailed = true;
}
}
if (hasFailed) {
return RestoreInProgress.State.FAILURE;
} else {
return RestoreInProgress.State.SUCCESS;
}
}
public static boolean completed(Map<ShardId, RestoreInProgress.ShardRestoreStatus> shards) {
for (RestoreInProgress.ShardRestoreStatus status : shards.values()) {
if (status.state().completed() == false) {
return false;
}
}
return true;
}
public static int failedShards(Map<ShardId, RestoreInProgress.ShardRestoreStatus> shards) {
int failedShards = 0;
for (RestoreInProgress.ShardRestoreStatus status : shards.values()) {
if (status.state() == RestoreInProgress.State.FAILURE) {
failedShards++;
}
}
return failedShards;
}
private static String renameIndex(String index, RestoreSnapshotRequest request, boolean isBackingIndex, boolean isFailureStore) {
if (request.renameReplacement() == null || request.renamePattern() == null) {
return index;
}
String prefix = null;
if (isBackingIndex && index.startsWith(DataStream.BACKING_INDEX_PREFIX)) {
prefix = DataStream.BACKING_INDEX_PREFIX;
}
if (isFailureStore && index.startsWith(DataStream.FAILURE_STORE_PREFIX)) {
prefix = DataStream.FAILURE_STORE_PREFIX;
}
String renamedIndex;
if (prefix != null) {
index = index.substring(prefix.length());
}
renamedIndex = safeRenameIndex(index, request.renamePattern(), request.renameReplacement());
if (prefix != null) {
renamedIndex = prefix + renamedIndex;
}
return renamedIndex;
}
private static Map<String, IndexId> renamedIndices(
RestoreSnapshotRequest request,
List<String> filteredIndices,
Set<String> dataStreamBackingIndices,
Set<String> dataStreamFailureIndices,
Set<String> featureIndices,
RepositoryData repositoryData
) {
Map<String, IndexId> renamedIndices = new HashMap<>();
for (String index : filteredIndices) {
String renamedIndex;
if (featureIndices.contains(index)) {
// Don't rename system indices
renamedIndex = index;
} else {
renamedIndex = renameIndex(
index,
request,
dataStreamBackingIndices.contains(index),
dataStreamFailureIndices.contains(index)
);
}
IndexId previousIndex = renamedIndices.put(renamedIndex, repositoryData.resolveIndexId(index));
if (previousIndex != null) {
throw new SnapshotRestoreException(
request.repository(),
request.snapshot(),
"indices [" + index + "] and [" + previousIndex.getName() + "] are renamed into the same index [" + renamedIndex + "]"
);
}
}
return Collections.unmodifiableMap(renamedIndices);
}
/**
* Checks that snapshots can be restored and have compatible version
* @param repository repository name
* @param snapshotInfo snapshot metadata
* @param preRestoreVersionChecks
*/
static void validateSnapshotRestorable(
RestoreSnapshotRequest request,
RepositoryMetadata repository,
SnapshotInfo snapshotInfo,
List<BiConsumer<Snapshot, IndexVersion>> preRestoreVersionChecks
) {
if (snapshotInfo.state().restorable() == false) {
throw new SnapshotRestoreException(
new Snapshot(snapshotInfo.projectId(), repository.name(), snapshotInfo.snapshotId()),
"unsupported snapshot state [" + snapshotInfo.state() + "]"
);
}
if (IndexVersion.current().before(snapshotInfo.version())) {
throw new SnapshotRestoreException(
new Snapshot(snapshotInfo.projectId(), repository.name(), snapshotInfo.snapshotId()),
"the snapshot was created with version ["
+ snapshotInfo.version().toReleaseVersion()
+ "] which is higher than the version of this node ["
+ IndexVersion.current().toReleaseVersion()
+ "]"
);
}
Snapshot snapshot = new Snapshot(snapshotInfo.projectId(), repository.name(), snapshotInfo.snapshotId());
preRestoreVersionChecks.forEach(c -> c.accept(snapshot, snapshotInfo.version()));
if (request.includeGlobalState() && snapshotInfo.includeGlobalState() == Boolean.FALSE) {
throw new SnapshotRestoreException(
new Snapshot(snapshotInfo.projectId(), repository.name(), snapshotInfo.snapshotId()),
"cannot restore global state since the snapshot was created without global state"
);
}
}
public static boolean failed(SnapshotInfo snapshot, String index) {
for (SnapshotShardFailure failure : snapshot.shardFailures()) {
if (index.equals(failure.index())) {
return true;
}
}
return false;
}
/**
* Returns the indices that are currently being restored and that are contained in the indices-to-check set.
*/
public static Set<Index> restoringIndices(final ProjectState currentState, final Set<Index> indicesToCheck) {
final Set<Index> indices = new HashSet<>();
for (RestoreInProgress.Entry entry : RestoreInProgress.get(currentState.cluster())) {
for (Map.Entry<ShardId, RestoreInProgress.ShardRestoreStatus> shard : entry.shards().entrySet()) {
Index index = shard.getKey().getIndex();
if (indicesToCheck.contains(index)
&& shard.getValue().state().completed() == false
&& currentState.metadata().index(index) != null) {
indices.add(index);
}
}
}
return indices;
}
public static RestoreInProgress.Entry restoreInProgress(ClusterState state, String restoreUUID) {
return RestoreInProgress.get(state).get(restoreUUID);
}
/**
* Set to true if {@link #removeCompletedRestoresFromClusterState()} already has an in-flight state update running that will clean up
* all completed restores from the cluster state.
*/
private volatile boolean cleanupInProgress = false;
// run a cluster state update that removes all completed restores from the cluster state
private void removeCompletedRestoresFromClusterState() {
submitUnbatchedTask("clean up snapshot restore status", new ClusterStateUpdateTask(Priority.URGENT) {
@Override
public ClusterState execute(ClusterState currentState) {
RestoreInProgress.Builder restoreInProgressBuilder = new RestoreInProgress.Builder();
boolean changed = false;
for (RestoreInProgress.Entry entry : RestoreInProgress.get(currentState)) {
if (entry.state().completed()) {
logger.log(
entry.quiet() ? Level.DEBUG : Level.INFO,
"completed restore of snapshot [{}] with state [{}]",
entry.snapshot(),
entry.state()
);
changed = true;
} else {
restoreInProgressBuilder.add(entry);
}
}
return changed == false
? currentState
: ClusterState.builder(currentState).putCustom(RestoreInProgress.TYPE, restoreInProgressBuilder.build()).build();
}
@Override
public void onFailure(final Exception e) {
cleanupInProgress = false;
logger.log(
MasterService.isPublishFailureException(e) ? Level.DEBUG : Level.WARN,
"failed to remove completed restores from cluster state",
e
);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
cleanupInProgress = false;
}
});
}
@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
if (event.localNodeMaster() && cleanupInProgress == false) {
for (RestoreInProgress.Entry entry : RestoreInProgress.get(event.state())) {
if (entry.state().completed()) {
assert completed(entry.shards()) : "state says completed but restore entries are not";
removeCompletedRestoresFromClusterState();
cleanupInProgress = true;
// the above method cleans up all completed restores, no need to keep looping
break;
}
}
}
} catch (Exception t) {
assert false : t;
logger.warn("Failed to update restore state ", t);
}
}
/**
* Optionally updates index settings in indexMetadata by removing settings listed in ignoreSettings and
* merging them with settings in changeSettings.
*/
private static IndexMetadata updateIndexSettings(
Snapshot snapshot,
IndexMetadata indexMetadata,
Settings changeSettings,
String[] ignoreSettings
) {
final Settings settings = indexMetadata.getSettings();
Settings normalizedChangeSettings = Settings.builder()
.put(changeSettings)
.normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX)
.build();
if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)
&& IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(changeSettings)
&& IndexSettings.INDEX_SOFT_DELETES_SETTING.get(changeSettings) == false) {
throw new SnapshotRestoreException(
snapshot,
"cannot disable setting [" + IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey() + "] on restore"
);
}
if ("snapshot".equals(INDEX_STORE_TYPE_SETTING.get(settings))) {
final Boolean changed = changeSettings.getAsBoolean(SEARCHABLE_SNAPSHOTS_DELETE_SNAPSHOT_ON_INDEX_DELETION, null);
if (changed != null) {
final Boolean previous = settings.getAsBoolean(SEARCHABLE_SNAPSHOTS_DELETE_SNAPSHOT_ON_INDEX_DELETION, null);
if (Objects.equals(previous, changed) == false) {
throw new SnapshotRestoreException(
snapshot,
format(
"cannot change value of [%s] when restoring searchable snapshot [%s:%s] as index %s",
SEARCHABLE_SNAPSHOTS_DELETE_SNAPSHOT_ON_INDEX_DELETION,
projectRepoString(snapshot.getProjectId(), snapshot.getRepository()),
snapshot.getSnapshotId().getName(),
indexMetadata.getIndex()
)
);
}
}
}
IndexMetadata.Builder builder = IndexMetadata.builder(indexMetadata);
Set<String> keyFilters = new HashSet<>();
List<String> simpleMatchPatterns = new ArrayList<>();
for (String ignoredSetting : ignoreSettings) {
if (Regex.isSimpleMatchPattern(ignoredSetting) == false) {
if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
} else {
keyFilters.add(ignoredSetting);
}
} else {
simpleMatchPatterns.add(ignoredSetting);
}
}
Settings.Builder settingsBuilder = Settings.builder().put(settings.filter(k -> {
if (UNREMOVABLE_SETTINGS.contains(k) == false) {
for (String filterKey : keyFilters) {
if (k.equals(filterKey)) {
return false;
}
}
for (String pattern : simpleMatchPatterns) {
if (Regex.simpleMatch(pattern, k)) {
return false;
}
}
}
return true;
})).put(normalizedChangeSettings.filter(k -> {
if (UNMODIFIABLE_SETTINGS.contains(k)) {
throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + k + "] on restore");
} else {
return true;
}
}));
settingsBuilder.remove(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey());
return builder.settings(settingsBuilder).build();
}
/**
* Cluster state update task that is executed to start a restore operation.
*/
private final
|
RestoreInProgressUpdater
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResultTests.java
|
{
"start": 1375,
"end": 5442
}
|
class ____ extends AbstractXContentSerializingTestCase<AggregationProfileShardResult> {
public static AggregationProfileShardResult createTestItem(int depth) {
int size = randomIntBetween(0, 5);
List<ProfileResult> aggProfileResults = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
aggProfileResults.add(ProfileResultTests.createTestItem(1));
}
return new AggregationProfileShardResult(aggProfileResults);
}
@Override
protected AggregationProfileShardResult createTestInstance() {
return createTestItem(2);
}
@Override
protected AggregationProfileShardResult mutateInstance(AggregationProfileShardResult instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected AggregationProfileShardResult doParseInstance(XContentParser parser) throws IOException {
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser);
XContentParserUtils.ensureFieldName(parser, parser.nextToken(), AggregationProfileShardResult.AGGREGATIONS);
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser);
AggregationProfileShardResult result = SearchResponseUtils.readAggregationProfileShardResult(parser);
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_ARRAY, parser.currentToken(), parser);
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser);
return result;
}
@Override
protected Reader<AggregationProfileShardResult> instanceReader() {
return AggregationProfileShardResult::new;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return ProfileResultTests.RANDOM_FIELDS_EXCLUDE_FILTER;
}
public void testToXContent() throws IOException {
List<ProfileResult> profileResults = new ArrayList<>();
Map<String, Long> breakdown = new LinkedHashMap<>();
breakdown.put("timing1", 2000L);
breakdown.put("timing2", 4000L);
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("stuff", "stuff");
debug.put("other_stuff", List.of("foo", "bar"));
ProfileResult profileResult = new ProfileResult("someType", "someDescription", breakdown, debug, 6000L, Collections.emptyList());
profileResults.add(profileResult);
AggregationProfileShardResult aggProfileResults = new AggregationProfileShardResult(profileResults);
BytesReference xContent = toXContent(aggProfileResults, XContentType.JSON, false);
assertEquals(XContentHelper.stripWhitespace("""
{
"aggregations": [
{
"type": "someType",
"description": "someDescription",
"time_in_nanos": 6000,
"breakdown": {
"timing1": 2000,
"timing2": 4000
},
"debug": {
"stuff": "stuff",
"other_stuff": [ "foo", "bar" ]
}
}
]
}"""), xContent.utf8ToString());
xContent = toXContent(aggProfileResults, XContentType.JSON, true);
assertEquals(XContentHelper.stripWhitespace("""
{
"aggregations": [
{
"type": "someType",
"description": "someDescription",
"time": "6micros",
"time_in_nanos": 6000,
"breakdown": {
"timing1": 2000,
"timing2": 4000
},
"debug": {
"stuff": "stuff",
"other_stuff": [ "foo", "bar" ]
}
}
]
}"""), xContent.utf8ToString());
}
}
|
AggregationProfileShardResultTests
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/conversion/jodatime/StringToLocalDateMapper.java
|
{
"start": 283,
"end": 483
}
|
interface ____ {
StringToLocalDateMapper INSTANCE = Mappers.getMapper( StringToLocalDateMapper.class );
TargetWithLocalDate sourceToTarget(SourceWithStringDate source);
}
|
StringToLocalDateMapper
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/event/internal/DefaultDirtyCheckEventListener.java
|
{
"start": 1306,
"end": 3859
}
|
class ____ implements DirtyCheckEventListener {
@Override
public void onDirtyCheck(DirtyCheckEvent event) throws HibernateException {
final var session = event.getSession();
final var persistenceContext = session.getPersistenceContextInternal();
final var holdersByKey = persistenceContext.getEntityHoldersByKey();
if ( holdersByKey != null ) {
for ( var holder : holdersByKey.values() ) {
if ( isEntityDirty( holder, session ) ) {
event.setDirty( true );
return;
}
}
}
final var entriesByCollection = persistenceContext.getCollectionEntries();
if ( entriesByCollection != null ) {
for ( var entry : entriesByCollection.entrySet() ) {
if ( isCollectionDirty( entry.getKey(), entry.getValue().getLoadedPersister() ) ) {
event.setDirty( true );
return;
}
}
}
}
private static boolean isEntityDirty(EntityHolder holder, EventSource session) {
final var entityEntry = holder.getEntityEntry();
if ( entityEntry == null ) {
// holders with no entity entry yet cannot contain dirty entities
return false;
}
final Status status = entityEntry.getStatus();
return switch ( status ) {
case GONE, READ_ONLY -> false;
case DELETED -> true;
case MANAGED -> isManagedEntityDirty( holder.getEntity(), entityEntry, session );
case SAVING, LOADING -> throw new AssertionFailure( "Unexpected status: " + status );
};
}
private static boolean isManagedEntityDirty(Object entity, EntityEntry entityEntry, EventSource session) {
if ( entityEntry.requiresDirtyCheck( entity ) ) { // takes into account CustomEntityDirtinessStrategy
final var persister = entityEntry.getPersister();
final var propertyValues =
entityEntry.getStatus() == Status.DELETED
? entityEntry.getDeletedState()
: persister.getValues( entity );
final var dirty =
persister.findDirty( propertyValues, entityEntry.getLoadedState(), entity, session );
return dirty != null;
}
else {
return false;
}
}
private static boolean isCollectionDirty(PersistentCollection<?> collection, CollectionPersister loadedPersister) {
return collection.isDirty()
|| collection.wasInitialized()
&& loadedPersister != null
&& loadedPersister.isMutable() //optimization
// && !loadedPersister.isInverse() // even if it's inverse, could still result in a cache update
&& ( collection.isDirectlyAccessible() || loadedPersister.getElementType().isMutable() ) //optimization
&& !collection.equalsSnapshot( loadedPersister );
}
}
|
DefaultDirtyCheckEventListener
|
java
|
google__dagger
|
javatests/dagger/functional/componentdependency/NonComponentDependencyComponent.java
|
{
"start": 933,
"end": 1202
}
|
class ____ {
@SuppressWarnings("unused")
@Inject
ThingTwo(
Thing thing,
NonComponentDependencyComponent nonComponentDependencyComponent,
NonComponentDependencyComponent.ThingComponent thingComponent) {}
}
// A non-component
|
ThingTwo
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/longarray/LongArrayAssert_startsWith_Test.java
|
{
"start": 963,
"end": 1306
}
|
class ____ extends LongArrayAssertBaseTest {
@Override
protected LongArrayAssert invoke_api_method() {
return assertions.startsWith(6L, 8L);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertStartsWith(getInfo(assertions), getActual(assertions), arrayOf(6L, 8L));
}
}
|
LongArrayAssert_startsWith_Test
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/ActionResponse.java
|
{
"start": 837,
"end": 1253
}
|
class ____ extends TransportResponse {
public ActionResponse() {}
/**
* A response with no payload. This is deliberately not an implementation of {@link ToXContent} or similar because an empty response
* has no valid {@link XContent} representation. Use {@link EmptyResponseListener} to convert this to a valid (plain-text) REST
* response instead.
*/
public static final
|
ActionResponse
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/RobotsTextPage.java
|
{
"start": 962,
"end": 1362
}
|
class ____ extends TextPage {
public static final String ROBOTS_TXT = "robots.txt";
public static final String ROBOTS_TXT_PATH = "/" + ROBOTS_TXT;
static final String USER_AGENT_LINE = "User-agent: *";
static final String DISALLOW_LINE = "Disallow: /";
@Override
public void render() {
putWithoutEscapeHtml(USER_AGENT_LINE);
putWithoutEscapeHtml(DISALLOW_LINE);
}
}
|
RobotsTextPage
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java
|
{
"start": 1549,
"end": 12598
}
|
class ____ implements LenientlyParsedTrainedModel, StrictlyParsedTrainedModel {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(Ensemble.class);
// TODO should we have regression/classification sub-classes that accept the builder?
public static final ParseField NAME = new ParseField("ensemble");
public static final ParseField FEATURE_NAMES = new ParseField("feature_names");
public static final ParseField TRAINED_MODELS = new ParseField("trained_models");
public static final ParseField AGGREGATE_OUTPUT = new ParseField("aggregate_output");
public static final ParseField CLASSIFICATION_LABELS = new ParseField("classification_labels");
public static final ParseField CLASSIFICATION_WEIGHTS = new ParseField("classification_weights");
private static final ObjectParser<Ensemble.Builder, Void> LENIENT_PARSER = createParser(true);
private static final ObjectParser<Ensemble.Builder, Void> STRICT_PARSER = createParser(false);
private static ObjectParser<Ensemble.Builder, Void> createParser(boolean lenient) {
ObjectParser<Ensemble.Builder, Void> parser = new ObjectParser<>(
NAME.getPreferredName(),
lenient,
Ensemble.Builder::builderForParser
);
parser.declareStringArray(Ensemble.Builder::setFeatureNames, FEATURE_NAMES);
parser.declareNamedObjects(
Ensemble.Builder::setTrainedModels,
(p, c, n) -> lenient
? p.namedObject(LenientlyParsedTrainedModel.class, n, null)
: p.namedObject(StrictlyParsedTrainedModel.class, n, null),
(ensembleBuilder) -> ensembleBuilder.setModelsAreOrdered(true),
TRAINED_MODELS
);
parser.declareNamedObject(
Ensemble.Builder::setOutputAggregator,
(p, c, n) -> lenient
? p.namedObject(LenientlyParsedOutputAggregator.class, n, null)
: p.namedObject(StrictlyParsedOutputAggregator.class, n, null),
AGGREGATE_OUTPUT
);
parser.declareString(Ensemble.Builder::setTargetType, TargetType.TARGET_TYPE);
parser.declareStringArray(Ensemble.Builder::setClassificationLabels, CLASSIFICATION_LABELS);
parser.declareDoubleArray(Ensemble.Builder::setClassificationWeights, CLASSIFICATION_WEIGHTS);
return parser;
}
public static Ensemble fromXContentStrict(XContentParser parser) {
return STRICT_PARSER.apply(parser, null).build();
}
public static Ensemble fromXContentLenient(XContentParser parser) {
return LENIENT_PARSER.apply(parser, null).build();
}
private final List<String> featureNames;
public List<TrainedModel> getModels() {
return models;
}
private final List<TrainedModel> models;
private final OutputAggregator outputAggregator;
private final TargetType targetType;
private final List<String> classificationLabels;
private final double[] classificationWeights;
Ensemble(
List<String> featureNames,
List<TrainedModel> models,
OutputAggregator outputAggregator,
TargetType targetType,
@Nullable List<String> classificationLabels,
@Nullable double[] classificationWeights
) {
this.featureNames = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(featureNames, FEATURE_NAMES));
this.models = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(models, TRAINED_MODELS));
this.outputAggregator = ExceptionsHelper.requireNonNull(outputAggregator, AGGREGATE_OUTPUT);
this.targetType = ExceptionsHelper.requireNonNull(targetType, TargetType.TARGET_TYPE);
this.classificationLabels = classificationLabels == null ? null : Collections.unmodifiableList(classificationLabels);
this.classificationWeights = classificationWeights == null
? null
: Arrays.copyOf(classificationWeights, classificationWeights.length);
}
public Ensemble(StreamInput in) throws IOException {
this.featureNames = in.readCollectionAsImmutableList(StreamInput::readString);
this.models = Collections.unmodifiableList(in.readNamedWriteableCollectionAsList(TrainedModel.class));
this.outputAggregator = in.readNamedWriteable(OutputAggregator.class);
this.targetType = TargetType.fromStream(in);
if (in.readBoolean()) {
this.classificationLabels = in.readStringCollectionAsList();
} else {
this.classificationLabels = null;
}
if (in.readBoolean()) {
this.classificationWeights = in.readDoubleArray();
} else {
this.classificationWeights = null;
}
}
@Override
public TargetType targetType() {
return targetType;
}
@Override
public String getWriteableName() {
return NAME.getPreferredName();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(featureNames);
out.writeNamedWriteableCollection(models);
out.writeNamedWriteable(outputAggregator);
targetType.writeTo(out);
out.writeBoolean(classificationLabels != null);
if (classificationLabels != null) {
out.writeStringCollection(classificationLabels);
}
out.writeBoolean(classificationWeights != null);
if (classificationWeights != null) {
out.writeDoubleArray(classificationWeights);
}
}
@Override
public String getName() {
return NAME.getPreferredName();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (featureNames.isEmpty() == false) {
builder.field(FEATURE_NAMES.getPreferredName(), featureNames);
}
NamedXContentObjectHelper.writeNamedObjects(builder, params, true, TRAINED_MODELS.getPreferredName(), models);
NamedXContentObjectHelper.writeNamedObjects(
builder,
params,
false,
AGGREGATE_OUTPUT.getPreferredName(),
Collections.singletonList(outputAggregator)
);
builder.field(TargetType.TARGET_TYPE.getPreferredName(), targetType.toString());
if (classificationLabels != null) {
builder.field(CLASSIFICATION_LABELS.getPreferredName(), classificationLabels);
}
if (classificationWeights != null) {
builder.field(CLASSIFICATION_WEIGHTS.getPreferredName(), classificationWeights);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Ensemble that = (Ensemble) o;
return Objects.equals(featureNames, that.featureNames)
&& Objects.equals(models, that.models)
&& Objects.equals(targetType, that.targetType)
&& Objects.equals(classificationLabels, that.classificationLabels)
&& Objects.equals(outputAggregator, that.outputAggregator)
&& Arrays.equals(classificationWeights, that.classificationWeights);
}
@Override
public int hashCode() {
return Objects.hash(
featureNames,
models,
outputAggregator,
targetType,
classificationLabels,
Arrays.hashCode(classificationWeights)
);
}
@Override
public void validate() {
if (this.models.isEmpty()) {
throw ExceptionsHelper.badRequestException("[{}] must not be empty", TRAINED_MODELS.getPreferredName());
}
if (outputAggregator.compatibleWith(targetType) == false) {
throw ExceptionsHelper.badRequestException(
"aggregate_output [{}] is not compatible with target_type [{}]",
this.targetType,
outputAggregator.getName()
);
}
if (outputAggregator.expectedValueSize() != null && outputAggregator.expectedValueSize() != models.size()) {
throw ExceptionsHelper.badRequestException(
"[{}] expects value array of size [{}] but number of models is [{}]",
AGGREGATE_OUTPUT.getPreferredName(),
outputAggregator.expectedValueSize(),
models.size()
);
}
if ((this.classificationLabels != null || this.classificationWeights != null) && (this.targetType != TargetType.CLASSIFICATION)) {
throw ExceptionsHelper.badRequestException(
"[target_type] should be [classification] if [classification_labels] or [classification_weights] are provided"
);
}
if (classificationWeights != null && classificationLabels != null && classificationWeights.length != classificationLabels.size()) {
throw ExceptionsHelper.badRequestException(
"[classification_weights] and [classification_labels] should be the same length if both are provided"
);
}
this.models.forEach(TrainedModel::validate);
}
@Override
public long estimatedNumOperations() {
OptionalDouble avg = models.stream().mapToLong(TrainedModel::estimatedNumOperations).average();
assert avg.isPresent() : "unexpected null when calculating number of operations";
// Average operations for each model and the operations required for processing and aggregating with the outputAggregator
return (long) Math.ceil(avg.getAsDouble()) + 2 * (models.size() - 1);
}
public static Builder builder() {
return new Builder();
}
@Override
public long ramBytesUsed() {
long size = SHALLOW_SIZE;
size += RamUsageEstimator.sizeOfCollection(featureNames);
size += RamUsageEstimator.sizeOfCollection(classificationLabels);
size += RamUsageEstimator.sizeOfCollection(models);
if (classificationWeights != null) {
size += RamUsageEstimator.sizeOf(classificationWeights);
}
size += outputAggregator.ramBytesUsed();
return size;
}
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> accountables = new ArrayList<>(models.size() + 1);
for (TrainedModel model : models) {
accountables.add(Accountables.namedAccountable(model.getName(), model));
}
accountables.add(Accountables.namedAccountable(outputAggregator.getName(), outputAggregator));
return Collections.unmodifiableCollection(accountables);
}
@Override
public TransportVersion getMinimalCompatibilityVersion() {
return models.stream()
.map(TrainedModel::getMinimalCompatibilityVersion)
.max(TransportVersion::compareTo)
.orElse(TransportVersion.zero());
}
public static
|
Ensemble
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/file/FileAssert_isAbsolute_Test.java
|
{
"start": 879,
"end": 1183
}
|
class ____ extends FileAssertBaseTest {
@Override
protected FileAssert invoke_api_method() {
return assertions.isAbsolute();
}
@Override
protected void verify_internal_effects() {
verify(files).assertIsAbsolute(getInfo(assertions), getActual(assertions));
}
}
|
FileAssert_isAbsolute_Test
|
java
|
alibaba__fastjson
|
src/main/java/com/alibaba/fastjson/serializer/PropertyPreFilter.java
|
{
"start": 653,
"end": 784
}
|
interface ____ extends SerializeFilter {
boolean apply(JSONSerializer serializer, Object object, String name);
}
|
PropertyPreFilter
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
|
{
"start": 18178,
"end": 19483
}
|
interface ____ {
String PREFIX = HdfsClientConfigKeys.PREFIX + "failover.";
String PROXY_PROVIDER_KEY_PREFIX = PREFIX + "proxy.provider";
String MAX_ATTEMPTS_KEY = PREFIX + "max.attempts";
int MAX_ATTEMPTS_DEFAULT = 15;
String SLEEPTIME_BASE_KEY = PREFIX + "sleep.base.millis";
int SLEEPTIME_BASE_DEFAULT = 500;
String SLEEPTIME_MAX_KEY = PREFIX + "sleep.max.millis";
int SLEEPTIME_MAX_DEFAULT = 15000;
String CONNECTION_RETRIES_KEY = PREFIX + "connection.retries";
int CONNECTION_RETRIES_DEFAULT = 0;
String CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
PREFIX + "connection.retries.on.timeouts";
int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
String RANDOM_ORDER = PREFIX + "random.order";
boolean RANDOM_ORDER_DEFAULT = true;
String RESOLVE_ADDRESS_NEEDED_KEY = PREFIX + "resolve-needed";
boolean RESOLVE_ADDRESS_NEEDED_DEFAULT = false;
String RESOLVE_SERVICE_KEY = PREFIX + "resolver.impl";
String RESOLVE_ADDRESS_TO_FQDN = PREFIX + "resolver.useFQDN";
boolean RESOLVE_ADDRESS_TO_FQDN_DEFAULT = true;
String DFS_CLIENT_LAZY_RESOLVED = PREFIX + "lazy.resolved";
boolean DFS_CLIENT_LAZY_RESOLVED_DEFAULT = false;
}
/** dfs.client.write configuration properties */
|
Failover
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/DescendingMultiset.java
|
{
"start": 3603,
"end": 4462
}
|
class ____ extends Multisets.EntrySet<E> {
@Override
Multiset<E> multiset() {
return DescendingMultiset.this;
}
@Override
public Iterator<Entry<E>> iterator() {
return entryIterator();
}
@Override
public int size() {
return forwardMultiset().entrySet().size();
}
}
return new EntrySetImpl();
}
@Override
public Iterator<E> iterator() {
return Multisets.iteratorImpl(this);
}
@Override
public @Nullable Object[] toArray() {
return standardToArray();
}
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] array) {
return standardToArray(array);
}
@Override
public String toString() {
return entrySet().toString();
}
}
|
EntrySetImpl
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/function/server/ServerResponse.java
|
{
"start": 12186,
"end": 17802
}
|
interface ____ extends HeadersBuilder<BodyBuilder> {
/**
* Set the length of the body in bytes, as specified by the
* {@code Content-Length} header.
* @param contentLength the content length
* @return this builder
* @see HttpHeaders#setContentLength(long)
*/
BodyBuilder contentLength(long contentLength);
/**
* Set the {@linkplain MediaType media type} of the body, as specified by the
* {@code Content-Type} header.
* @param contentType the content type
* @return this builder
* @see HttpHeaders#setContentType(MediaType)
*/
BodyBuilder contentType(MediaType contentType);
/**
* Add a serialization hint like {@link JacksonCodecSupport#JSON_VIEW_HINT}
* to customize how the body will be serialized.
* @param key the hint key
* @param value the hint value
*/
BodyBuilder hint(String key, Object value);
/**
* Customize the serialization hints with the given consumer.
* @param hintsConsumer a function that consumes the hints
* @return this builder
* @since 5.1.6
*/
BodyBuilder hints(Consumer<Map<String, Object>> hintsConsumer);
/**
* Set the body of the response to the given {@code Object} and return it.
* This is a shortcut for using a {@link #body(BodyInserter)} with a
* {@linkplain BodyInserters#fromValue value inserter}.
* @param body the body of the response
* @return the built response
* @throws IllegalArgumentException if {@code body} is a
* {@link Publisher} or producer known to {@link ReactiveAdapterRegistry}
* @since 5.2
*/
Mono<ServerResponse> bodyValue(Object body);
/**
* Set the body of the response to the given {@code Object} and return it.
* This is a shortcut for using a {@link #body(BodyInserter)} with a
* {@linkplain BodyInserters#fromValue value inserter}.
* @param body the body of the response
* @param bodyType the type of the body, used to capture the generic type
* @param <T> the type of the body
* @return the built response
* @throws IllegalArgumentException if {@code body} is a
* {@link Publisher} or producer known to {@link ReactiveAdapterRegistry}
* @since 6.2
*/
<T> Mono<ServerResponse> bodyValue(T body, ParameterizedTypeReference<T> bodyType);
/**
* Set the body from the given {@code Publisher}. Shortcut for
* {@link #body(BodyInserter)} with a
* {@linkplain BodyInserters#fromPublisher Publisher inserter}.
* @param publisher the {@code Publisher} to write to the response
* @param elementClass the type of elements published
* @param <T> the type of the elements contained in the publisher
* @param <P> the type of the {@code Publisher}
* @return the built response
*/
<T, P extends Publisher<T>> Mono<ServerResponse> body(P publisher, Class<T> elementClass);
/**
* Variant of {@link #body(Publisher, Class)} that allows using any
* producer that can be resolved to {@link Publisher} via
* {@link ReactiveAdapterRegistry}.
* @param publisher the {@code Publisher} to use to write the response
* @param elementTypeRef the type of elements produced
* @param <T> the type of the elements contained in the publisher
* @param <P> the type of the {@code Publisher}
* @return the built response
*/
<T, P extends Publisher<T>> Mono<ServerResponse> body(P publisher,
ParameterizedTypeReference<T> elementTypeRef);
/**
* Variant of {@link #body(Publisher, Class)} that allows using any
* producer that can be resolved to {@link Publisher} via
* {@link ReactiveAdapterRegistry}.
* @param producer the producer to write to the request
* @param elementClass the type of elements produced
* @return the built response
* @since 5.2
*/
Mono<ServerResponse> body(Object producer, Class<?> elementClass);
/**
* Variant of {@link #body(Publisher, ParameterizedTypeReference)} that
* allows using any producer that can be resolved to {@link Publisher}
* via {@link ReactiveAdapterRegistry}.
* @param producer the producer to write to the response
* @param elementTypeRef the type of elements produced
* @return the built response
* @since 5.2
*/
Mono<ServerResponse> body(Object producer, ParameterizedTypeReference<?> elementTypeRef);
/**
* Set the body of the response to the given {@code BodyInserter} and return it.
* @param inserter the {@code BodyInserter} that writes to the response
* @return the built response
*/
Mono<ServerResponse> body(BodyInserter<?, ? super ServerHttpResponse> inserter);
/**
* Render the template with the given {@code name} using the given {@code modelAttributes}.
* The model attributes are mapped under a
* {@linkplain org.springframework.core.Conventions#getVariableName generated name}.
* <p><em>Note: Empty {@link Collection Collections} are not added to
* the model when using this method because we cannot correctly determine
* the true convention name.</em>
* @param name the name of the template to be rendered
* @param modelAttributes the modelAttributes used to render the template
* @return the built response
*/
Mono<ServerResponse> render(String name, Object... modelAttributes);
/**
* Render the template with the given {@code name} using the given {@code model}.
* @param name the name of the template to be rendered
* @param model the model used to render the template
* @return the built response
*/
Mono<ServerResponse> render(String name, Map<String, ?> model);
}
/**
* Defines the context used during the {@link #writeTo(ServerWebExchange, Context)}.
*/
|
BodyBuilder
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java
|
{
"start": 1283,
"end": 4843
}
|
class ____ extends ESTestCase {
protected static final List<Whitelist> PAINLESS_BASE_WHITELIST = PainlessPlugin.baseWhiteList();
protected PainlessScriptEngine scriptEngine;
@Before
public void setup() {
scriptEngine = new PainlessScriptEngine(scriptEngineSettings(), scriptContexts());
}
/**
* Settings used to build the script engine. Override to customize settings like {@link RegexTests} does to enable regexes.
*/
protected Settings scriptEngineSettings() {
return Settings.EMPTY;
}
/**
* Script contexts used to build the script engine. Override to customize which script contexts are available.
*/
protected Map<ScriptContext<?>, List<Whitelist>> scriptContexts() {
Map<ScriptContext<?>, List<Whitelist>> contexts = new HashMap<>();
List<Whitelist> whitelists = new ArrayList<>(PAINLESS_BASE_WHITELIST);
whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.test"));
contexts.put(PainlessTestScript.CONTEXT, whitelists);
return contexts;
}
/** Compiles and returns the result of {@code script} */
public Object exec(String script) {
return exec(script, null, true);
}
/** Compiles and returns the result of {@code script} with access to {@code picky} */
public Object exec(String script, boolean picky) {
return exec(script, null, picky);
}
/** Compiles and returns the result of {@code script} with access to {@code vars} */
public Object exec(String script, Map<String, Object> vars, boolean picky) {
return exec(script, vars, Map.of(CompilerSettings.INITIAL_CALL_SITE_DEPTH, random().nextBoolean() ? "0" : "10"), picky);
}
/** Compiles and returns the result of {@code script} with access to {@code vars} and compile-time parameters */
public Object exec(String script, Map<String, Object> vars, Map<String, String> compileParams, boolean picky) {
// test for ambiguity errors before running the actual script if picky is true
if (picky) {
CompilerSettings pickySettings = new CompilerSettings();
pickySettings.setPicky(true);
pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings()));
Walker.buildPainlessTree(getTestName(), script, pickySettings);
}
// test actual script execution
PainlessTestScript.Factory factory = scriptEngine.compile(null, script, PainlessTestScript.CONTEXT, compileParams);
PainlessTestScript testScript = factory.newInstance(vars == null ? Collections.emptyMap() : vars);
return testScript.execute();
}
/**
* Uses the {@link Debugger} to get the bytecode output for a script and compare
* it against an expected bytecode passed in as a String.
*/
public void assertBytecodeExists(String script, String bytecode) {
final String asm = Debugger.toString(script);
assertTrue("bytecode not found, got: \n" + asm, asm.contains(bytecode));
}
/**
* Uses the {@link Debugger} to get the bytecode output for a script and compare
* it against an expected bytecode pattern as a regular expression (please try to avoid!)
*/
public void assertBytecodeHasPattern(String script, String pattern) {
final String asm = Debugger.toString(script);
assertTrue("bytecode not found, got: \n" + asm, asm.matches(pattern));
}
/** Checks a specific exception
|
ScriptTestCase
|
java
|
quarkusio__quarkus
|
independent-projects/qute/core/src/main/java/io/quarkus/qute/Parser.java
|
{
"start": 53761,
"end": 54283
}
|
class ____ implements TemplateNode {
@Override
public CompletionStage<ResultNode> resolve(ResolutionContext context) {
throw new IllegalStateException();
}
@Override
public Kind kind() {
throw new UnsupportedOperationException();
}
@Override
public Origin getOrigin() {
throw new IllegalStateException();
}
}
// A dummy node for comments, it's only used when removing standalone lines
static
|
BlockNode
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/src/main/java/org/apache/hadoop/applications/mawo/server/common/TeardownTask.java
|
{
"start": 931,
"end": 1567
}
|
class ____ extends SimpleTask {
/**
* Teardown task default constructor.
*/
public TeardownTask() {
super();
setTaskType(TaskType.TEARDOWN);
}
/**
* Teardown Task constructor.
* @param taskId : Teardown task Id
* @param environment : Environment map for teardown task
* @param taskCMD : Teardown task command
* @param timeout : Timeout for Teardown task
*/
public TeardownTask(final TaskId taskId,
final Map<String, String> environment,
final String taskCMD, final long timeout) {
super(taskId, environment, taskCMD, timeout);
setTaskType(TaskType.TEARDOWN);
}
}
|
TeardownTask
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.