language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
grpc__grpc-java
|
gcp-observability/src/main/java/io/grpc/gcp/observability/interceptors/InternalLoggingChannelInterceptor.java
|
{
"start": 2159,
"end": 9396
}
|
class ____ implements Factory {
private final LogHelper helper;
private final ConfigFilterHelper filterHelper;
/**
* Create the {@link Factory} we need to create our {@link ClientInterceptor}s.
*/
public FactoryImpl(LogHelper helper, ConfigFilterHelper filterHelper) {
this.helper = helper;
this.filterHelper = filterHelper;
}
@Override
public ClientInterceptor create() {
return new InternalLoggingChannelInterceptor(helper, filterHelper);
}
}
private InternalLoggingChannelInterceptor(LogHelper helper, ConfigFilterHelper filterHelper) {
this.helper = helper;
this.filterHelper = filterHelper;
}
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(MethodDescriptor<ReqT, RespT> method,
CallOptions callOptions, Channel next) {
final AtomicLong seq = new AtomicLong(1);
final String callId = UUID.randomUUID().toString();
final String authority = next.authority();
final String serviceName = method.getServiceName();
final String methodName = method.getBareMethodName();
// Get the stricter deadline to calculate the timeout once the call starts
final Deadline deadline = LogHelper.min(callOptions.getDeadline(),
Context.current().getDeadline());
final SpanContext clientSpanContext = callOptions.getOption(CLIENT_TRACE_SPAN_CONTEXT_KEY);
FilterParams filterParams = filterHelper.logRpcMethod(method.getFullMethodName(), true);
if (!filterParams.log()) {
return next.newCall(method, callOptions);
}
final int maxHeaderBytes = filterParams.headerBytes();
final int maxMessageBytes = filterParams.messageBytes();
return new SimpleForwardingClientCall<ReqT, RespT>(next.newCall(method, callOptions)) {
@Override
public void start(Listener<RespT> responseListener, Metadata headers) {
// Event: EventType.CLIENT_HEADER
// The timeout should reflect the time remaining when the call is started, so compute
// remaining time here.
final Duration timeout = deadline == null ? null
: Durations.fromNanos(deadline.timeRemaining(TimeUnit.NANOSECONDS));
try {
helper.logClientHeader(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
timeout,
headers,
maxHeaderBytes,
EventLogger.CLIENT,
callId,
null,
clientSpanContext);
} catch (Exception e) {
// Catching generic exceptions instead of specific ones for all the events.
// This way we can catch both expected and unexpected exceptions instead of re-throwing
// exceptions to callers which will lead to RPC getting aborted.
// Expected exceptions to be caught:
// 1. IllegalArgumentException
// 2. NullPointerException
logger.log(Level.SEVERE, "Unable to log request header", e);
}
Listener<RespT> observabilityListener =
new SimpleForwardingClientCallListener<RespT>(responseListener) {
@Override
public void onMessage(RespT message) {
// Event: EventType.SERVER_MESSAGE
try {
helper.logRpcMessage(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
EventType.SERVER_MESSAGE,
message,
maxMessageBytes,
EventLogger.CLIENT,
callId,
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log response message", e);
}
super.onMessage(message);
}
@Override
public void onHeaders(Metadata headers) {
// Event: EventType.SERVER_HEADER
try {
helper.logServerHeader(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
headers,
maxHeaderBytes,
EventLogger.CLIENT,
callId,
LogHelper.getPeerAddress(getAttributes()),
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log response header", e);
}
super.onHeaders(headers);
}
@Override
public void onClose(Status status, Metadata trailers) {
// Event: EventType.SERVER_TRAILER
try {
helper.logTrailer(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
status,
trailers,
maxHeaderBytes,
EventLogger.CLIENT,
callId,
LogHelper.getPeerAddress(getAttributes()),
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log trailer", e);
}
super.onClose(status, trailers);
}
};
super.start(observabilityListener, headers);
}
@Override
public void sendMessage(ReqT message) {
// Event: EventType.CLIENT_MESSAGE
try {
helper.logRpcMessage(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
EventType.CLIENT_MESSAGE,
message,
maxMessageBytes,
EventLogger.CLIENT,
callId,
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log request message", e);
}
super.sendMessage(message);
}
@Override
public void halfClose() {
// Event: EventType.CLIENT_HALF_CLOSE
try {
helper.logHalfClose(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
EventLogger.CLIENT,
callId,
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log half close", e);
}
super.halfClose();
}
@Override
public void cancel(String message, Throwable cause) {
// Event: EventType.CANCEL
try {
helper.logCancel(
seq.getAndIncrement(),
serviceName,
methodName,
authority,
EventLogger.CLIENT,
callId,
clientSpanContext);
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to log cancel", e);
}
super.cancel(message, cause);
}
};
}
}
|
FactoryImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/DeleteQueryRulesetAction.java
|
{
"start": 1177,
"end": 1463
}
|
class ____ {
public static final String NAME = "cluster:admin/xpack/query_rules/delete";
public static final ActionType<AcknowledgedResponse> INSTANCE = new ActionType<>(NAME);
private DeleteQueryRulesetAction() {/* no instances */}
public static
|
DeleteQueryRulesetAction
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/VertxInputStream.java
|
{
"start": 800,
"end": 5287
}
|
class ____ extends InputStream {
public static final String CONTINUE = "100-continue";
private final VertxBlockingInput exchange;
private boolean closed;
private boolean finished;
private ByteBuf pooled;
private final long limit;
private ContinueState continueState = ContinueState.NONE;
public VertxInputStream(RoutingContext request, long timeout) {
this.exchange = new VertxBlockingInput(request.request(), timeout);
Long limitObj = request.get(VertxHttpRecorder.MAX_REQUEST_SIZE_KEY);
if (limitObj == null) {
limit = -1;
} else {
limit = limitObj;
}
String expect = request.request().getHeader(HttpHeaderNames.EXPECT);
if (expect != null && expect.equalsIgnoreCase(CONTINUE)) {
continueState = ContinueState.REQUIRED;
}
}
public VertxInputStream(RoutingContext request, long timeout, ByteBuf existing) {
this.exchange = new VertxBlockingInput(request.request(), timeout);
Long limitObj = request.get(VertxHttpRecorder.MAX_REQUEST_SIZE_KEY);
if (limitObj == null) {
limit = -1;
} else {
limit = limitObj;
}
this.pooled = existing;
}
@Override
public int read() throws IOException {
byte[] b = new byte[1];
int read = read(b);
if (read == -1) {
return -1;
}
return b[0] & 0xff;
}
@Override
public int read(final byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(final byte[] b, final int off, final int len) throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
if (continueState == ContinueState.REQUIRED) {
continueState = ContinueState.SENT;
exchange.request.response().writeContinue();
}
readIntoBuffer();
if (limit > 0 && exchange.request.bytesRead() > limit) {
HttpServerResponse response = exchange.request.response();
if (response.headWritten()) {
//the response has been written, not much we can do
exchange.request.connection().close();
throw new IOException("Request too large");
} else {
response.setStatusCode(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE.code());
response.headers().add(HttpHeaderNames.CONNECTION, "close");
response.endHandler(new Handler<Void>() {
@Override
public void handle(Void event) {
exchange.request.connection().close();
}
});
response.end();
throw new IOException("Request too large");
}
}
if (finished) {
return -1;
}
if (len == 0) {
return 0;
}
ByteBuf buffer = pooled;
int copied = Math.min(len, buffer.readableBytes());
buffer.readBytes(b, off, copied);
if (!buffer.isReadable()) {
pooled.release();
pooled = null;
}
return copied;
}
private void readIntoBuffer() throws IOException {
if (pooled == null && !finished) {
pooled = exchange.readBlocking();
if (pooled == null) {
finished = true;
pooled = null;
}
}
}
@Override
public int available() throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
if (finished) {
return 0;
}
return exchange.readBytesAvailable();
}
@Override
public void close() throws IOException {
if (closed) {
return;
}
closed = true;
try {
while (!finished) {
readIntoBuffer();
if (pooled != null) {
pooled.release();
pooled = null;
}
}
} catch (IOException | RuntimeException e) {
//our exchange is all broken, just end it
throw e;
} finally {
if (pooled != null) {
pooled.release();
pooled = null;
}
finished = true;
}
}
public static
|
VertxInputStream
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/core/binding/Bindings.java
|
{
"start": 1272,
"end": 4255
}
|
class ____ implements Iterable<Bindings.Binding> {
private static final Bindings EMPTY = new Bindings();
private final Map<BindMarker, Binding> bindings;
/**
* Create empty {@link Bindings}.
*/
public Bindings() {
this.bindings = Collections.emptyMap();
}
/**
* Create {@link Bindings} from the given collection.
* @param bindings a collection of {@link Binding} objects
*/
public Bindings(Collection<Binding> bindings) {
Assert.notNull(bindings, "Collection must not be null");
Map<BindMarker, Binding> mapping = CollectionUtils.newLinkedHashMap(bindings.size());
bindings.forEach(binding -> mapping.put(binding.getBindMarker(), binding));
this.bindings = mapping;
}
Bindings(Map<BindMarker, Binding> bindings) {
this.bindings = bindings;
}
protected Map<BindMarker, Binding> getBindings() {
return this.bindings;
}
/**
* Merge this bindings with an other {@link Bindings} object and
* create a new merged {@link Bindings} object.
* @param other the object to merge with
* @return a newly merged {@link Bindings} object
*/
public Bindings and(Bindings other) {
return merge(this, other);
}
/**
* Apply the bindings to a {@link BindTarget}.
* @param bindTarget the target to apply bindings to
*/
public void apply(BindTarget bindTarget) {
Assert.notNull(bindTarget, "BindTarget must not be null");
this.bindings.forEach((marker, binding) -> binding.apply(bindTarget));
}
/**
* Perform the given action for each binding of this {@link Bindings} until all
* bindings have been processed or the action throws an exception. Actions are
* performed in the order of iteration (if an iteration order is specified).
* Exceptions thrown by the action are relayed to the
* @param action the action to be performed for each {@link Binding}
*/
@Override
public void forEach(Consumer<? super Binding> action) {
this.bindings.forEach((marker, binding) -> action.accept(binding));
}
@Override
public Iterator<Binding> iterator() {
return this.bindings.values().iterator();
}
@Override
public Spliterator<Binding> spliterator() {
return this.bindings.values().spliterator();
}
/**
* Return an empty {@link Bindings} object.
*/
public static Bindings empty() {
return EMPTY;
}
/**
* Merge this bindings with an other {@link Bindings} object and
* create a new merged {@link Bindings} object.
* @param left the left object to merge with
* @param right the right object to merge with
* @return a newly merged {@link Bindings} object
*/
public static Bindings merge(Bindings left, Bindings right) {
Assert.notNull(left, "Left side Bindings must not be null");
Assert.notNull(right, "Right side Bindings must not be null");
List<Binding> result = new ArrayList<>(left.getBindings().size() + right.getBindings().size());
result.addAll(left.getBindings().values());
result.addAll(right.getBindings().values());
return new Bindings(result);
}
/**
* Base
|
Bindings
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ObserveableOp.java
|
{
"start": 1075,
"end": 1143
}
|
class ____ extends Operation {
/**
* The observation
|
ObserveableOp
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java
|
{
"start": 2351,
"end": 8149
}
|
class ____ extends AbstractS3ACostTest {
private static final Logger LOG =
LoggerFactory.getLogger(ITestXAttrCost.class);
private static final int GET_METADATA_ON_OBJECT = 1;
private static final int GET_METADATA_ON_DIR = GET_METADATA_ON_OBJECT * 2;
@Test
public void testXAttrRoot() throws Throwable {
describe("Test xattr on root");
Path root = new Path("/");
S3AFileSystem fs = getFileSystem();
Map<String, byte[]> xAttrs = verifyMetrics(
() -> fs.getXAttrs(root),
with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_OBJECT));
logXAttrs(xAttrs);
List<String> headerList = verifyMetrics(() ->
fs.listXAttrs(root),
with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_OBJECT));
// don't make any assertions on the headers entries
// as different S3 providers may have different headers
// and they may even change over time.
Assertions.assertThat(headerList)
.describedAs("Headers on root object")
.hasSize(xAttrs.size());
}
/**
* Log the attributes as strings.
* @param xAttrs map of attributes
*/
private void logXAttrs(final Map<String, byte[]> xAttrs) {
xAttrs.forEach((k, v) ->
LOG.info("{} has bytes[{}] => \"{}\"",
k, v.length, decodeBytes(v)));
}
@Test
public void testXAttrFile() throws Throwable {
describe("Test xattr on a file");
Path testFile = methodPath();
S3AFileSystem fs = getFileSystem();
boolean createPerformance = isCreatePerformanceEnabled(fs);
create(testFile, true,
createPerformance ? NO_HEAD_OR_LIST : CREATE_FILE_OVERWRITE);
Map<String, byte[]> xAttrs = verifyMetrics(() ->
fs.getXAttrs(testFile),
with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_OBJECT));
logXAttrs(xAttrs);
assertHeaderEntry(xAttrs, XA_CONTENT_LENGTH)
.isEqualTo("0");
// get the list of supported headers
List<String> headerList = verifyMetrics(
() -> fs.listXAttrs(testFile),
with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_OBJECT));
// verify this contains all the standard markers,
// but not the magic marker header
Assertions.assertThat(headerList)
.describedAs("Supported headers")
.containsAnyElementsOf(Arrays.asList(XA_STANDARD_HEADERS));
// ask for one header and validate its value
byte[] bytes = verifyMetrics(() ->
fs.getXAttr(testFile, XA_CONTENT_LENGTH),
with(INVOCATION_XATTR_GET_NAMED, GET_METADATA_ON_OBJECT));
assertHeader(XA_CONTENT_LENGTH, bytes)
.isEqualTo("0");
assertHeaderEntry(xAttrs, XA_CONTENT_TYPE)
.isEqualTo(CONTENT_TYPE_OCTET_STREAM);
}
/**
* Directory attributes can be retrieved, but they take two HEAD requests.
* @throws Throwable
*/
@Test
public void testXAttrDir() throws Throwable {
describe("Test xattr on a dir");
S3AFileSystem fs = getFileSystem();
Path dir = methodPath();
fs.mkdirs(dir);
Map<String, byte[]> xAttrs = verifyMetrics(() ->
fs.getXAttrs(dir),
with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_DIR));
logXAttrs(xAttrs);
assertHeaderEntry(xAttrs, XA_CONTENT_LENGTH)
.isEqualTo("0");
// get the list of supported headers
List<String> headerList = verifyMetrics(
() -> fs.listXAttrs(dir),
with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_DIR));
// verify this contains all the standard markers,
// but not the magic marker header
Assertions.assertThat(headerList)
.describedAs("Supported headers")
.containsAnyElementsOf(Arrays.asList(XA_STANDARD_HEADERS));
// ask for one header and validate its value
byte[] bytes = verifyMetrics(() ->
fs.getXAttr(dir, XA_CONTENT_LENGTH),
with(INVOCATION_XATTR_GET_NAMED, GET_METADATA_ON_DIR));
assertHeader(XA_CONTENT_LENGTH, bytes)
.isEqualTo("0");
assertHeaderEntry(xAttrs, XA_CONTENT_TYPE)
.isEqualTo(CONTENT_TYPE_X_DIRECTORY);
}
/**
* When the operations are called on a missing path, FNFE is
* raised and only one attempt is made to retry the operation.
*/
@Test
public void testXAttrMissingFile() throws Throwable {
describe("Test xattr on a missing path");
Path testFile = methodPath();
S3AFileSystem fs = getFileSystem();
int getMetadataOnMissingFile = GET_METADATA_ON_DIR;
verifyMetricsIntercepting(FileNotFoundException.class, "", () ->
fs.getXAttrs(testFile),
with(INVOCATION_XATTR_GET_MAP, getMetadataOnMissingFile));
verifyMetricsIntercepting(FileNotFoundException.class, "", () ->
fs.getXAttr(testFile, XA_CONTENT_LENGTH),
with(INVOCATION_XATTR_GET_NAMED, getMetadataOnMissingFile));
verifyMetricsIntercepting(FileNotFoundException.class, "", () ->
fs.listXAttrs(testFile),
with(INVOCATION_OP_XATTR_LIST, getMetadataOnMissingFile));
}
/**
* Generate an assert on a named header in the map.
* @param xAttrs attribute map
* @param key header key
* @return the assertion
*/
private AbstractStringAssert<?> assertHeaderEntry(
Map<String, byte[]> xAttrs, String key) {
return assertHeader(key, xAttrs.get(key));
}
/**
* Create an assertion on the header; check for the bytes
* being non-null/empty and then returns the decoded values
* as a string assert.
* @param key header key (for error)
* @param bytes value
* @return the assertion
*/
private AbstractStringAssert<?> assertHeader(final String key,
final byte[] bytes) {
String decoded = decodeBytes(bytes);
return Assertions.assertThat(decoded)
.describedAs("xattr %s decoded to: %s", key, decoded)
.isNotNull()
.isNotEmpty();
}
}
|
ITestXAttrCost
|
java
|
google__dagger
|
javatests/dagger/hilt/processor/internal/originatingelement/OriginatingElementProcessorTest.java
|
{
"start": 984,
"end": 1182
}
|
class ____ {
@Test
public void originatingElementOnInnerClass_fails() {
Source outer1 =
HiltCompilerTests.javaSource("test.Outer1", "package test;", "", "
|
OriginatingElementProcessorTest
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestIntegrationTests.java
|
{
"start": 84835,
"end": 85320
}
|
interface ____ {
}
@ParameterizedTest(quoteTextArguments = false)
@TwoCsvFileSources
void testWithRepeatableCsvFileSourceAsMetaAnnotation(String column1, String column2) {
fail("%s %s".formatted(column1, column2));
}
@ParameterizedTest(quoteTextArguments = false)
@CsvSource({ "a" })
@CsvSource({ "b" })
void testWithRepeatableCsvSource(String argument) {
fail(argument);
}
@CsvSource({ "a" })
@CsvSource({ "b" })
@Retention(RUNTIME)
@
|
TwoCsvFileSources
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/jndi/JndiObjectLocator.java
|
{
"start": 1929,
"end": 3671
}
|
class ____ extends JndiLocatorSupport implements InitializingBean {
private @Nullable String jndiName;
private @Nullable Class<?> expectedType;
/**
* Specify the JNDI name to look up. If it doesn't begin with "java:comp/env/"
* this prefix is added automatically if "resourceRef" is set to "true".
* @param jndiName the JNDI name to look up
* @see #setResourceRef
*/
public void setJndiName(@Nullable String jndiName) {
this.jndiName = jndiName;
}
/**
* Return the JNDI name to look up.
*/
public @Nullable String getJndiName() {
return this.jndiName;
}
/**
* Specify the type that the located JNDI object is supposed
* to be assignable to, if any.
*/
public void setExpectedType(@Nullable Class<?> expectedType) {
this.expectedType = expectedType;
}
/**
* Return the type that the located JNDI object is supposed
* to be assignable to, if any.
*/
public @Nullable Class<?> getExpectedType() {
return this.expectedType;
}
@Override
public void afterPropertiesSet() throws IllegalArgumentException, NamingException {
if (!StringUtils.hasLength(getJndiName())) {
throw new IllegalArgumentException("Property 'jndiName' is required");
}
}
/**
* Perform the actual JNDI lookup for this locator's target resource.
* @return the located target object
* @throws NamingException if the JNDI lookup failed or if the
* located JNDI object is not assignable to the expected type
* @see #setJndiName
* @see #setExpectedType
* @see #lookup(String, Class)
*/
protected Object lookup() throws NamingException {
String jndiName = getJndiName();
Assert.state(jndiName != null, "No JNDI name specified");
return lookup(jndiName, getExpectedType());
}
}
|
JndiObjectLocator
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/struct/FormatFeatureUnwrapSingleTest.java
|
{
"start": 1297,
"end": 1466
}
|
class ____ {
@JsonFormat(with={ JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED })
public short[] v = { (short) 7 };
}
static
|
UnwrapShortArray
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/JoinTaskIterator.java
|
{
"start": 1299,
"end": 3154
}
|
interface ____<V1, V2, O> {
/**
* General-purpose open method. Initializes the internal strategy (for example triggers the
* sorting of the inputs or starts building hash tables).
*
* @throws IOException Thrown, if an I/O error occurred while preparing the data. An example is
* a failing external sort.
* @throws MemoryAllocationException Thrown, if the internal strategy could not allocate the
* memory it needs.
* @throws InterruptedException Thrown, if the thread was interrupted during the initialization
* process.
*/
void open() throws IOException, MemoryAllocationException, InterruptedException;
/**
* General-purpose close method. Works after the principle of best effort. The internal
* structures are released, but errors that occur on the way are not reported.
*/
void close();
/**
* Moves the internal pointer to the next key that both inputs share. It calls the match stub
* with the cross product of all values that share the same key.
*
* @param matchFunction The match stub containing the match function which is called with the
* keys.
* @param collector The collector to pass the match function.
* @return True, if a next key exists, false if no more keys exist.
* @throws Exception Exceptions from the user code are forwarded.
*/
boolean callWithNextKey(FlatJoinFunction<V1, V2, O> matchFunction, Collector<O> collector)
throws Exception;
/**
* Aborts the matching process. This extra abort method is supplied, because a significant time
* may pass while calling the match stub with the cross product of all values that share the
* same key. A call to this abort method signals an interrupt to that procedure.
*/
void abort();
}
|
JoinTaskIterator
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java
|
{
"start": 3159,
"end": 10300
}
|
class ____ {
private static final String WORKER_GROUP_ID = "connect-worker-group-id";
// Use a single embedded cluster for all test cases in order to cut down on runtime
private static EmbeddedConnectCluster connect;
@BeforeAll
public static void setup() {
Map<String, String> workerProps = new HashMap<>();
workerProps.put(GROUP_ID_CONFIG, WORKER_GROUP_ID);
TestPlugins.TestPlugin[] testPlugins = new TestPlugins.TestPlugin[] {
TestPlugins.TestPlugin.BAD_PACKAGING_DEFAULT_CONSTRUCTOR_THROWS_CONVERTER,
TestPlugins.TestPlugin.BAD_PACKAGING_DEFAULT_CONSTRUCTOR_THROWS_CONVERTER,
TestPlugins.TestPlugin.BAD_PACKAGING_INNOCUOUS_CONNECTOR
};
workerProps.put(
WorkerConfig.PLUGIN_PATH_CONFIG,
TestPlugins.pluginPathJoined(testPlugins)
);
connect = new EmbeddedConnectCluster.Builder()
.name("connector-validation-connect-cluster")
.workerProps(workerProps)
.numBrokers(1)
.numWorkers(1)
.build();
connect.start();
}
@AfterAll
public static void close() {
if (connect != null) {
Utils.closeQuietly(connect::stop, "Embedded Connect cluster");
}
}
@Test
public void testSinkConnectorHasNeitherTopicsListNorTopicsRegex() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
config.remove(TOPICS_CONFIG);
config.remove(TOPICS_REGEX_CONFIG);
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
2, // One error each for topics list and topics regex
"Sink connector config should fail preflight validation when neither topics list nor topics regex are provided",
0
);
}
@Test
public void testSinkConnectorHasBothTopicsListAndTopicsRegex() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
config.put(TOPICS_CONFIG, "t1");
config.put(TOPICS_REGEX_CONFIG, "r.*");
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
2, // One error each for topics list and topics regex
"Sink connector config should fail preflight validation when both topics list and topics regex are provided",
0
);
}
@Test
public void testSinkConnectorDeadLetterQueueTopicInTopicsList() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
config.put(TOPICS_CONFIG, "t1");
config.put(DLQ_TOPIC_NAME_CONFIG, "t1");
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Sink connector config should fail preflight validation when DLQ topic is included in topics list",
0
);
}
@Test
public void testSinkConnectorDeadLetterQueueTopicMatchesTopicsRegex() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
config.put(TOPICS_REGEX_CONFIG, "r.*");
config.put(DLQ_TOPIC_NAME_CONFIG, "ruh.roh");
config.remove(TOPICS_CONFIG);
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Sink connector config should fail preflight validation when DLQ topic matches topics regex",
0
);
}
@Test
public void testSinkConnectorDefaultGroupIdConflictsWithWorkerGroupId() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
// Combined with the logic in SinkUtils::consumerGroupId, this should conflict with the worker group ID
config.put(NAME_CONFIG, "worker-group-id");
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Sink connector config should fail preflight validation when default consumer group ID conflicts with Connect worker group ID",
0
);
}
@Test
public void testSinkConnectorOverriddenGroupIdConflictsWithWorkerGroupId() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
config.put(CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + GROUP_ID_CONFIG, WORKER_GROUP_ID);
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Sink connector config should fail preflight validation when overridden consumer group ID conflicts with Connect worker group ID",
0
);
}
@Test
public void testSourceConnectorHasDuplicateTopicCreationGroups() throws InterruptedException {
Map<String, String> config = defaultSourceConnectorProps();
config.put(TOPIC_CREATION_GROUPS_CONFIG, "g1, g2, g1");
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Source connector config should fail preflight validation when the same topic creation group is specified multiple times",
0
);
}
@Test
public void testConnectorHasDuplicateTransformations() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
String transformName = "t";
config.put(TRANSFORMS_CONFIG, transformName + ", " + transformName);
config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", Filter.class.getName());
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Connector config should fail preflight validation when the same transformation is specified multiple times",
0
);
}
@Test
public void testConnectorHasMissingTransformClass() throws InterruptedException {
Map<String, String> config = defaultSinkConnectorProps();
String transformName = "t";
config.put(TRANSFORMS_CONFIG, transformName);
config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", "WheresTheFruit");
connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation(
config.get(CONNECTOR_CLASS_CONFIG),
config,
1,
"Connector config should fail preflight validation when a transformation with a
|
ConnectorValidationIntegrationTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java
|
{
"start": 880,
"end": 4478
}
|
class ____ {
private long last = -1;
private boolean started = false;
private long cumulate = 0;
/**
* Constructor
*
* @param start
* Start the timer upon construction.
*/
public NanoTimer(boolean start) {
if (start) this.start();
}
/**
* Start the timer.
*
* Note: No effect if timer is already started.
*/
public void start() {
if (!this.started) {
this.last = System.nanoTime();
this.started = true;
}
}
/**
* Stop the timer.
*
* Note: No effect if timer is already stopped.
*/
public void stop() {
if (this.started) {
this.started = false;
this.cumulate += System.nanoTime() - this.last;
}
}
/**
* Read the timer.
*
* @return the elapsed time in nano-seconds. Note: If the timer is never
* started before, -1 is returned.
*/
public long read() {
if (!readable()) return -1;
return this.cumulate;
}
/**
* Reset the timer.
*/
public void reset() {
this.last = -1;
this.started = false;
this.cumulate = 0;
}
/**
* Checking whether the timer is started
*
* @return true if timer is started.
*/
public boolean isStarted() {
return this.started;
}
/**
* Format the elapsed time to a human understandable string.
*
* Note: If timer is never started, "ERR" will be returned.
*/
@Override
public String toString() {
if (!readable()) {
return "ERR";
}
return NanoTimer.nanoTimeToString(this.cumulate);
}
/**
* A utility method to format a time duration in nano seconds into a human
* understandable stirng.
*
* @param t
* Time duration in nano seconds.
* @return String representation.
*/
public static String nanoTimeToString(long t) {
if (t < 0) return "ERR";
if (t == 0) return "0";
if (t < 1000) {
return t + "ns";
}
double us = (double) t / 1000;
if (us < 1000) {
return String.format("%.2fus", us);
}
double ms = us / 1000;
if (ms < 1000) {
return String.format("%.2fms", ms);
}
double ss = ms / 1000;
if (ss < 1000) {
return String.format("%.2fs", ss);
}
long mm = (long) ss / 60;
ss -= mm * 60;
long hh = mm / 60;
mm -= hh * 60;
long dd = hh / 24;
hh -= dd * 24;
if (dd > 0) {
return String.format("%dd%dh", dd, hh);
}
if (hh > 0) {
return String.format("%dh%dm", hh, mm);
}
if (mm > 0) {
return String.format("%dm%.1fs", mm, ss);
}
return String.format("%.2fs", ss);
/**
* StringBuilder sb = new StringBuilder(); String sep = "";
*
* if (dd > 0) { String unit = (dd > 1) ? "days" : "day";
* sb.append(String.format("%s%d%s", sep, dd, unit)); sep = " "; }
*
* if (hh > 0) { String unit = (hh > 1) ? "hrs" : "hr";
* sb.append(String.format("%s%d%s", sep, hh, unit)); sep = " "; }
*
* if (mm > 0) { String unit = (mm > 1) ? "mins" : "min";
* sb.append(String.format("%s%d%s", sep, mm, unit)); sep = " "; }
*
* if (ss > 0) { String unit = (ss > 1) ? "secs" : "sec";
* sb.append(String.format("%s%.3f%s", sep, ss, unit)); sep = " "; }
*
* return sb.toString();
*/
}
private boolean readable() {
return this.last != -1;
}
/**
* Simple tester.
*
* @param args
*/
public static void main(String[] args) {
long i = 7;
for (int x = 0; x < 20; ++x, i *= 7) {
System.out.println(NanoTimer.nanoTimeToString(i));
}
}
}
|
NanoTimer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/PreferJavaTimeOverloadTest.java
|
{
"start": 12430,
"end": 12864
}
|
class ____ {
private void bar(org.joda.time.ReadableDuration d) {}
public void foo(org.joda.time.Duration jodaDuration) {
bar(jodaDuration);
}
}
""")
.doTest();
}
@Test
public void callingPrimitiveOverloadFromFieldInitializer() {
helper
.addSourceLines(
"TestClass.java",
"""
public
|
TestClass
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/collection/spi/PersistentList.java
|
{
"start": 11824,
"end": 12042
}
|
class ____ extends AbstractValueDelayedOperation {
public SimpleAdd(E addedValue) {
super( addedValue, null );
}
@Override
public void operate() {
list.add( getAddedInstance() );
}
}
abstract
|
SimpleAdd
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationCoverageTests.java
|
{
"start": 249114,
"end": 249258
}
|
interface ____<T> {
MessageHeaders getHeaders();
@SuppressWarnings("rawtypes")
List getList();
int[] getIa();
}
public static
|
Message
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/applicationfieldaccess/PublicFieldAccessAssociationsTest.java
|
{
"start": 1204,
"end": 5005
}
|
class ____ {
private static final String CONTAINED_VALUE = "someValue";
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(ContainingEntity.class)
.addClass(FieldAccessEnhancedDelegate.class))
.withConfigurationResource("application-fetch-max-depth-zero.properties");
@Inject
EntityManager em;
@Inject
UserTransaction transaction;
@Test
public void testFieldAccess()
throws SystemException, NotSupportedException, HeuristicRollbackException, HeuristicMixedException,
RollbackException {
// Ideally we'd write a @ParameterizedTest and pass the delegates as parameters,
// but we cannot do that due to JUnit using a different classloader than the test.
for (FieldAccessEnhancedDelegate delegate : FieldAccessEnhancedDelegate.values()) {
doTestFieldAccess(delegate);
}
}
private void doTestFieldAccess(FieldAccessEnhancedDelegate delegate)
throws SystemException, NotSupportedException, HeuristicRollbackException, HeuristicMixedException,
RollbackException {
ContainingEntity entity = new ContainingEntity();
ContainedEntity containedEntity = new ContainedEntity();
containedEntity.value = CONTAINED_VALUE;
transaction.begin();
em.persist(entity);
em.persist(containedEntity);
transaction.commit();
transaction.begin();
entity = em.getReference(ContainingEntity.class, entity.id);
containedEntity = em.getReference(ContainedEntity.class, containedEntity.id);
// Initially the assertion doesn't pass: the value was not set yet
AssertionError expected = null;
try {
delegate.assertValueAndLaziness(entity, containedEntity);
} catch (AssertionError e) {
expected = e;
}
if (expected == null) {
throw new IllegalStateException("This test is buggy: assertions should not pass at this point.");
}
transaction.rollback();
transaction.begin();
entity = em.getReference(ContainingEntity.class, entity.id);
containedEntity = em.getReference(ContainedEntity.class, containedEntity.id);
// Since field access is replaced with accessor calls,
// we expect this change to be detected by dirty tracking and persisted.
delegate.setValue(entity, containedEntity);
transaction.commit();
// Test getReference()
transaction.begin();
entity = em.getReference(ContainingEntity.class, entity.id);
containedEntity = em.getReference(ContainedEntity.class, containedEntity.id);
// We're working on an uninitialized proxy.
assertThat(entity).returns(false, Hibernate::isInitialized);
// The above should have persisted a value that passes the assertion.
delegate.assertValueAndLaziness(entity, containedEntity);
// Accessing the value should trigger initialization of the proxy.
assertThat(entity).returns(true, Hibernate::isInitialized);
transaction.commit();
// Test find()
transaction.begin();
entity = em.find(ContainingEntity.class, entity.id);
containedEntity = em.find(ContainedEntity.class, containedEntity.id);
// We're working on an actual entity instance (not a proxy).
assertThat(entity).returns(true, Hibernate::isInitialized);
// The above should have persisted a value that passes the assertion.
delegate.assertValueAndLaziness(entity, containedEntity);
transaction.commit();
}
@Entity
public static
|
PublicFieldAccessAssociationsTest
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-30/src/main/java/org/redisson/spring/data/connection/RedissonStreamCommands.java
|
{
"start": 12254,
"end": 18273
}
|
class ____ implements MultiDecoder<PendingMessages> {
private final String groupName;
private final Range<?> range;
public PendingMessagesReplayDecoder(String groupName, Range<?> range) {
this.groupName = groupName;
this.range = range;
}
@Override
public PendingMessages decode(List<Object> parts, State state) {
List<PendingMessage> pendingMessages = (List<PendingMessage>) (Object) parts;
return new PendingMessages(groupName, range, pendingMessages);
}
}
@Override
public PendingMessages xPending(byte[] key, String groupName, XPendingOptions options) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(groupName, "Group name must not be null!");
List<Object> params = new ArrayList<>();
params.add(key);
params.add(groupName);
params.add(((Range.Bound<String>)options.getRange().getLowerBound()).getValue().orElse("-"));
params.add(((Range.Bound<String>)options.getRange().getUpperBound()).getValue().orElse("+"));
if (options.getCount() != null) {
params.add(options.getCount());
} else {
params.add(10);
}
if (options.getConsumerName() != null) {
params.add(options.getConsumerName());
}
return connection.write(key, StringCodec.INSTANCE, new RedisCommand<>("XPENDING",
new ListMultiDecoder2<PendingMessages>(
new PendingMessagesReplayDecoder(groupName, options.getRange()),
new PendingMessageReplayDecoder(groupName))),
params.toArray());
}
@Override
public Long xAck(byte[] key, String group, RecordId... recordIds) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(group, "Group must not be null!");
Assert.notNull(recordIds, "recordIds must not be null!");
List<Object> params = new ArrayList<>();
params.add(key);
params.add(group);
params.addAll(toStringList(recordIds));
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XACK, params.toArray());
}
private static final RedisStrictCommand<RecordId> XADD = new RedisStrictCommand<RecordId>("XADD", obj -> RecordId.of(obj.toString()));
@Override
public RecordId xAdd(MapRecord<byte[], byte[], byte[]> record, XAddOptions options) {
Assert.notNull(record, "record must not be null!");
List<Object> params = new LinkedList<>();
params.add(record.getStream());
if (options.getMaxlen() != null) {
params.add("MAXLEN");
params.add(options.getMaxlen());
}
if (!record.getId().shouldBeAutoGenerated()) {
params.add(record.getId().getValue());
} else {
params.add("*");
}
record.getValue().forEach((key, value) -> {
params.add(key);
params.add(value);
});
return connection.write(record.getStream(), StringCodec.INSTANCE, XADD, params.toArray());
}
@Override
public Long xDel(byte[] key, RecordId... recordIds) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(recordIds, "recordIds must not be null!");
List<Object> params = new ArrayList<>();
params.add(key);
params.addAll(toStringList(recordIds));
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XDEL, params.toArray());
}
private static final RedisStrictCommand<String> XGROUP_STRING = new RedisStrictCommand<>("XGROUP");
@Override
public String xGroupCreate(byte[] key, String groupName, ReadOffset readOffset) {
return xGroupCreate(key, groupName, readOffset, false);
}
private static final RedisStrictCommand<Boolean> XGROUP_BOOLEAN = new RedisStrictCommand<Boolean>("XGROUP", obj -> ((Long)obj) > 0);
@Override
public Boolean xGroupDelConsumer(byte[] key, Consumer consumer) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(consumer, "Consumer must not be null!");
Assert.notNull(consumer.getName(), "Consumer name must not be null!");
Assert.notNull(consumer.getGroup(), "Consumer group must not be null!");
return connection.write(key, StringCodec.INSTANCE, XGROUP_BOOLEAN, "DELCONSUMER", key, consumer.getGroup(), consumer.getName());
}
@Override
public Boolean xGroupDestroy(byte[] key, String groupName) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(groupName, "GroupName must not be null!");
return connection.write(key, StringCodec.INSTANCE, XGROUP_BOOLEAN, "DESTROY", key, groupName);
}
@Override
public Long xLen(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return connection.write(key, StringCodec.INSTANCE, RedisCommands.XLEN, key);
}
private List<ByteRecord> range(RedisCommand<?> rangeCommand, byte[] key, Range<String> range, Limit limit) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(range, "Range must not be null!");
Assert.notNull(limit, "Limit must not be null!");
List<Object> params = new LinkedList<>();
params.add(key);
if (rangeCommand.getName().equals(RedisCommands.XRANGE.getName())) {
params.add(range.getLowerBound().getValue().orElse("-"));
params.add(range.getUpperBound().getValue().orElse("+"));
} else {
params.add(range.getUpperBound().getValue().orElse("+"));
params.add(range.getLowerBound().getValue().orElse("-"));
}
if (limit.getCount() > 0) {
params.add("COUNT");
params.add(limit.getCount());
}
return connection.write(key, ByteArrayCodec.INSTANCE, rangeCommand, params.toArray());
}
private static
|
PendingMessagesReplayDecoder
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/MongoClientName.java
|
{
"start": 1159,
"end": 1558
}
|
class ____ extends AnnotationLiteral<MongoClientName> implements MongoClientName {
public static Literal of(String value) {
return new Literal(value);
}
private final String value;
public Literal(String value) {
this.value = value;
}
@Override
public String value() {
return value;
}
}
}
|
Literal
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-s3/src/test/java/org/apache/camel/component/aws2/s3/integration/S3ConsumerIgnoreBodyIT.java
|
{
"start": 1326,
"end": 2806
}
|
class ____ extends Aws2S3Base {
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint result;
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
template.send("direct:putObject", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(AWS2S3Constants.KEY, "test.txt");
exchange.getIn().setBody("Test");
}
});
Awaitility.await().atMost(10, TimeUnit.SECONDS)
.untilAsserted(() -> MockEndpoint.assertIsSatisfied(context));
Assertions.assertNull(result.getExchanges().get(0).getMessage().getBody());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String awsEndpoint = "aws2-s3://" + name.get() + "?autoCreateBucket=true";
from("direct:putObject").startupOrder(1).to(awsEndpoint);
from("aws2-s3://" + name.get()
+ "?moveAfterRead=true&destinationBucket=camel-kafka-connector&autoCreateBucket=true&destinationBucketPrefix=RAW(movedPrefix)&destinationBucketSuffix=RAW(movedSuffix)&ignoreBody=true")
.startupOrder(2).log("${body}").to("mock:result");
}
};
}
}
|
S3ConsumerIgnoreBodyIT
|
java
|
spring-projects__spring-boot
|
module/spring-boot-graphql/src/test/java/org/springframework/boot/graphql/autoconfigure/GraphQlTestDataFetchers.java
|
{
"start": 1020,
"end": 2155
}
|
class ____ {
private static final List<Book> books = Arrays.asList(
new Book("book-1", "GraphQL for beginners", 100, "John GraphQL"),
new Book("book-2", "Harry Potter and the Philosopher's Stone", 223, "Joanne Rowling"),
new Book("book-3", "Moby Dick", 635, "Moby Dick"), new Book("book-3", "Moby Dick", 635, "Moby Dick"));
private GraphQlTestDataFetchers() {
}
public static DataFetcher<Book> getBookByIdDataFetcher() {
return (environment) -> {
String id = environment.getArgument("id");
assertThat(id).isNotNull();
return getBookById(id);
};
}
public static DataFetcher<Flux<Book>> getBooksOnSaleDataFetcher() {
return (environment) -> {
Integer minPages = environment.getArgument("minPages");
assertThat(minPages).isNotNull();
return getBooksOnSale(minPages);
};
}
public static @Nullable Book getBookById(String id) {
return books.stream().filter((book) -> book.getId().equals(id)).findFirst().orElse(null);
}
public static Flux<Book> getBooksOnSale(int minPages) {
return Flux.fromIterable(books).filter((book) -> book.getPageCount() >= minPages);
}
}
|
GraphQlTestDataFetchers
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
|
{
"start": 55946,
"end": 63965
}
|
class ____ extends SubjectInheritingThread {
final private BlockingQueue<Connection> pendingConnections;
private final Selector readSelector;
Reader(String name) throws IOException {
super(name);
this.pendingConnections =
new LinkedBlockingQueue<Connection>(readerPendingConnectionQueue);
this.readSelector = Selector.open();
}
@Override
public void work() {
LOG.info("Starting " + Thread.currentThread().getName());
try {
doRunLoop();
} finally {
try {
readSelector.close();
} catch (IOException ioe) {
LOG.error("Error closing read selector in " + Thread.currentThread().getName(), ioe);
}
}
}
private synchronized void doRunLoop() {
while (running) {
SelectionKey key = null;
try {
// consume as many connections as currently queued to avoid
// unbridled acceptance of connections that starves the select
int size = pendingConnections.size();
for (int i=size; i>0; i--) {
Connection conn = pendingConnections.take();
conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
}
readSelector.select();
Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
while (iter.hasNext()) {
key = iter.next();
iter.remove();
try {
if (key.isReadable()) {
doRead(key);
}
} catch (CancelledKeyException cke) {
// something else closed the connection, ex. responder or
// the listener doing an idle scan. ignore it and let them
// clean up.
LOG.info(Thread.currentThread().getName() +
": connection aborted from " + key.attachment());
}
key = null;
}
} catch (InterruptedException e) {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
}
} catch (IOException ex) {
LOG.error("Error in Reader", ex);
} catch (Throwable re) {
LOG.error("Bug in read selector!", re);
ExitUtil.terminate(1, "Bug in read selector!");
}
}
}
/**
* Updating the readSelector while it's being used is not thread-safe,
* so the connection must be queued. The reader will drain the queue
* and update its readSelector before performing the next select
*/
public void addConnection(Connection conn) throws InterruptedException {
pendingConnections.put(conn);
readSelector.wakeup();
}
void shutdown() {
assert !running;
readSelector.wakeup();
try {
super.interrupt();
super.join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
@Override
public void work() {
LOG.info(Thread.currentThread().getName() + ": starting");
SERVER.set(Server.this);
connectionManager.startIdleScan();
while (running) {
SelectionKey key = null;
try {
getSelector().select();
Iterator<SelectionKey> iter = getSelector().selectedKeys().iterator();
while (iter.hasNext()) {
key = iter.next();
iter.remove();
try {
if (key.isValid()) {
if (key.isAcceptable())
doAccept(key);
}
} catch (IOException e) {
}
key = null;
}
} catch (OutOfMemoryError e) {
// we can run out of memory if we have too many threads
// log the event and sleep for a minute and give
// some thread(s) a chance to finish
LOG.warn("Out of Memory in server select", e);
closeCurrentConnection(key, e);
connectionManager.closeIdle(true);
try { Thread.sleep(60000); } catch (Exception ie) {}
} catch (Exception e) {
closeCurrentConnection(key, e);
}
}
LOG.info("Stopping " + Thread.currentThread().getName());
synchronized (this) {
try {
acceptChannel.close();
selector.close();
} catch (IOException e) { }
selector= null;
acceptChannel= null;
// close all connections
connectionManager.stopIdleScan();
connectionManager.closeAll();
}
}
private void closeCurrentConnection(SelectionKey key, Throwable e) {
if (key != null) {
Connection c = (Connection)key.attachment();
if (c != null) {
closeConnection(c);
c = null;
}
}
}
InetSocketAddress getAddress() {
return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
}
void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
ServerSocketChannel server = (ServerSocketChannel) key.channel();
SocketChannel channel;
while ((channel = server.accept()) != null) {
channel.configureBlocking(false);
channel.socket().setTcpNoDelay(tcpNoDelay);
channel.socket().setKeepAlive(true);
Reader reader = getReader();
Connection c = connectionManager.register(channel,
this.listenPort, this.isOnAuxiliaryPort);
// If the connectionManager can't take it, close the connection.
if (c == null) {
if (channel.isOpen()) {
IOUtils.cleanupWithLogger(LOG, channel);
}
connectionManager.droppedConnections.getAndIncrement();
continue;
}
key.attach(c); // so closeCurrentConnection can get the object
reader.addConnection(c);
}
}
void doRead(SelectionKey key) throws InterruptedException {
int count;
Connection c = (Connection)key.attachment();
if (c == null) {
return;
}
c.setLastContact(Time.now());
try {
count = c.readAndProcess();
} catch (InterruptedException ieo) {
LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
throw ieo;
} catch (Exception e) {
// Any exceptions that reach here are fatal unexpected internal errors
// that could not be sent to the client.
LOG.info(Thread.currentThread().getName() +
": readAndProcess from client " + c +
" threw exception [" + e + "]", e);
count = -1; //so that the (count < 0) block is executed
}
// setupResponse will signal the connection should be closed when a
// fatal response is sent.
if (count < 0 || c.shouldClose()) {
closeConnection(c);
c = null;
}
else {
c.setLastContact(Time.now());
}
}
synchronized void doStop() {
if (selector != null) {
selector.wakeup();
Thread.yield();
}
if (acceptChannel != null) {
try {
acceptChannel.socket().close();
} catch (IOException e) {
LOG.info(Thread.currentThread().getName() + ":Exception in closing listener socket. " + e);
}
}
for (Reader r : readers) {
r.shutdown();
}
}
synchronized Selector getSelector() { return selector; }
// The method that will return the next reader to work with
// Simplistic implementation of round robin for now
Reader getReader() {
currentReader = (currentReader + 1) % readers.length;
return readers[currentReader];
}
}
// Sends responses of RPC back to clients.
private
|
Reader
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/header/writers/ClearSiteDataHeaderWriter.java
|
{
"start": 3975,
"end": 4208
}
|
class ____ implements RequestMatcher {
@Override
public boolean matches(HttpServletRequest request) {
return request.isSecure();
}
@Override
public String toString() {
return "Is Secure";
}
}
}
|
SecureRequestMatcher
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ArchivedExecutionGraph.java
|
{
"start": 3318,
"end": 20693
}
|
enum ____, i.e. the timestamp when the graph went into state "RUNNING" is at {@code
* stateTimestamps[RUNNING.ordinal()]}.
*/
private final long[] stateTimestamps;
// ------ Configuration of the Execution -------
// ------ Execution status and progress. These values are volatile, and accessed under the lock
// -------
/** Current status of the job execution. */
private final JobStatus state;
/** The job type of the job execution. */
@Nullable private final JobType jobType;
/**
* The exception that caused the job to fail. This is set to the first root exception that was
* not recoverable and triggered job failure
*/
@Nullable private final ErrorInfo failureCause;
// ------ Fields that are only relevant for archived execution graphs ------------
private final JobPlanInfo.Plan plan;
private final StringifiedAccumulatorResult[] archivedUserAccumulators;
private final ArchivedExecutionConfig archivedExecutionConfig;
private final boolean isStoppable;
private final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators;
@Nullable private final CheckpointCoordinatorConfiguration jobCheckpointingConfiguration;
@Nullable private final CheckpointStatsSnapshot checkpointStatsSnapshot;
@Nullable private final String stateBackendName;
@Nullable private final String checkpointStorageName;
@Nullable private final TernaryBoolean stateChangelogEnabled;
@Nullable private final String changelogStorageName;
@Nullable private final String streamGraphJson;
private final int pendingOperatorCount;
public ArchivedExecutionGraph(
JobID jobID,
String jobName,
Map<JobVertexID, ArchivedExecutionJobVertex> tasks,
List<ArchivedExecutionJobVertex> verticesInCreationOrder,
long[] stateTimestamps,
JobStatus state,
@Nullable JobType jobType,
@Nullable ErrorInfo failureCause,
JobPlanInfo.Plan plan,
StringifiedAccumulatorResult[] archivedUserAccumulators,
Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators,
ArchivedExecutionConfig executionConfig,
boolean isStoppable,
@Nullable CheckpointCoordinatorConfiguration jobCheckpointingConfiguration,
@Nullable CheckpointStatsSnapshot checkpointStatsSnapshot,
@Nullable String stateBackendName,
@Nullable String checkpointStorageName,
@Nullable TernaryBoolean stateChangelogEnabled,
@Nullable String changelogStorageName,
@Nullable String streamGraphJson,
int pendingOperatorCount) {
this.jobID = Preconditions.checkNotNull(jobID);
this.jobName = Preconditions.checkNotNull(jobName);
this.tasks = Preconditions.checkNotNull(tasks);
this.verticesInCreationOrder = Preconditions.checkNotNull(verticesInCreationOrder);
this.stateTimestamps = Preconditions.checkNotNull(stateTimestamps);
this.state = Preconditions.checkNotNull(state);
this.jobType = jobType;
this.failureCause = failureCause;
this.plan = Preconditions.checkNotNull(plan);
this.archivedUserAccumulators = Preconditions.checkNotNull(archivedUserAccumulators);
this.serializedUserAccumulators = Preconditions.checkNotNull(serializedUserAccumulators);
this.archivedExecutionConfig = Preconditions.checkNotNull(executionConfig);
this.isStoppable = isStoppable;
this.jobCheckpointingConfiguration = jobCheckpointingConfiguration;
this.checkpointStatsSnapshot = checkpointStatsSnapshot;
this.stateBackendName = stateBackendName;
this.checkpointStorageName = checkpointStorageName;
this.stateChangelogEnabled = stateChangelogEnabled;
this.changelogStorageName = changelogStorageName;
this.streamGraphJson = streamGraphJson;
this.pendingOperatorCount = pendingOperatorCount;
}
// --------------------------------------------------------------------------------------------
@Override
public JobPlanInfo.Plan getPlan() {
return plan;
}
@Override
public String getStreamGraphJson() {
return streamGraphJson;
}
@Override
public JobID getJobID() {
return jobID;
}
@Override
public String getJobName() {
return jobName;
}
@Override
public JobStatus getState() {
return state;
}
@Override
public JobType getJobType() {
return jobType;
}
@Nullable
@Override
public ErrorInfo getFailureInfo() {
return failureCause;
}
@Override
public ArchivedExecutionJobVertex getJobVertex(JobVertexID id) {
return this.tasks.get(id);
}
@Override
public Map<JobVertexID, AccessExecutionJobVertex> getAllVertices() {
return Collections.<JobVertexID, AccessExecutionJobVertex>unmodifiableMap(this.tasks);
}
@Override
public Iterable<ArchivedExecutionJobVertex> getVerticesTopologically() {
// we return a specific iterator that does not fail with concurrent modifications
// the list is append only, so it is safe for that
final int numElements = this.verticesInCreationOrder.size();
return new Iterable<ArchivedExecutionJobVertex>() {
@Override
public Iterator<ArchivedExecutionJobVertex> iterator() {
return new Iterator<ArchivedExecutionJobVertex>() {
private int pos = 0;
@Override
public boolean hasNext() {
return pos < numElements;
}
@Override
public ArchivedExecutionJobVertex next() {
if (hasNext()) {
return verticesInCreationOrder.get(pos++);
} else {
throw new NoSuchElementException();
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
@Override
public Iterable<ArchivedExecutionVertex> getAllExecutionVertices() {
return new Iterable<ArchivedExecutionVertex>() {
@Override
public Iterator<ArchivedExecutionVertex> iterator() {
return new AllVerticesIterator<>(getVerticesTopologically().iterator());
}
};
}
@Override
public long getStatusTimestamp(JobStatus status) {
return this.stateTimestamps[status.ordinal()];
}
@Override
public CheckpointCoordinatorConfiguration getCheckpointCoordinatorConfiguration() {
return jobCheckpointingConfiguration;
}
@Override
public CheckpointStatsSnapshot getCheckpointStatsSnapshot() {
return checkpointStatsSnapshot;
}
@Override
public ArchivedExecutionConfig getArchivedExecutionConfig() {
return archivedExecutionConfig;
}
@Override
public boolean isStoppable() {
return isStoppable;
}
@Override
public StringifiedAccumulatorResult[] getAccumulatorResultsStringified() {
return archivedUserAccumulators;
}
@Override
public Map<String, SerializedValue<OptionalFailure<Object>>> getAccumulatorsSerialized() {
return serializedUserAccumulators;
}
@Override
public Optional<String> getStateBackendName() {
return Optional.ofNullable(stateBackendName);
}
@Override
public Optional<String> getCheckpointStorageName() {
return Optional.ofNullable(checkpointStorageName);
}
@Override
public TernaryBoolean isChangelogStateBackendEnabled() {
return stateChangelogEnabled;
}
@Override
public Optional<String> getChangelogStorageName() {
return Optional.ofNullable(changelogStorageName);
}
@Override
public int getPendingOperatorCount() {
return pendingOperatorCount;
}
/**
* Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}.
*
* @param executionGraph to create the ArchivedExecutionGraph from
* @return ArchivedExecutionGraph created from the given ExecutionGraph
*/
public static ArchivedExecutionGraph createFrom(ExecutionGraph executionGraph) {
return createFrom(executionGraph, null);
}
/**
* Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}.
*
* @param executionGraph to create the ArchivedExecutionGraph from
* @param statusOverride optionally overrides the JobStatus of the ExecutionGraph with a
* non-globally-terminal state and clears timestamps of globally-terminal states
* @return ArchivedExecutionGraph created from the given ExecutionGraph
*/
public static ArchivedExecutionGraph createFrom(
ExecutionGraph executionGraph, @Nullable JobStatus statusOverride) {
Preconditions.checkArgument(
statusOverride == null || !statusOverride.isGloballyTerminalState(),
"Status override is only allowed for non-globally-terminal states.");
Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks = new HashMap<>();
List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>();
for (ExecutionJobVertex task : executionGraph.getVerticesTopologically()) {
ArchivedExecutionJobVertex archivedTask = task.archive();
archivedVerticesInCreationOrder.add(archivedTask);
archivedTasks.put(task.getJobVertexId(), archivedTask);
}
final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators =
executionGraph.getAccumulatorsSerialized();
final long[] timestamps = new long[JobStatus.values().length];
// if the state is overridden with a non-globally-terminal state then we need to erase
// traces of globally-terminal states for consistency
final boolean clearGloballyTerminalStateTimestamps = statusOverride != null;
for (JobStatus jobStatus : JobStatus.values()) {
final int ordinal = jobStatus.ordinal();
if (!(clearGloballyTerminalStateTimestamps && jobStatus.isGloballyTerminalState())) {
timestamps[ordinal] = executionGraph.getStatusTimestamp(jobStatus);
}
}
return new ArchivedExecutionGraph(
executionGraph.getJobID(),
executionGraph.getJobName(),
archivedTasks,
archivedVerticesInCreationOrder,
timestamps,
statusOverride == null ? executionGraph.getState() : statusOverride,
executionGraph.getJobType(),
executionGraph.getFailureInfo(),
executionGraph.getPlan(),
executionGraph.getAccumulatorResultsStringified(),
serializedUserAccumulators,
executionGraph.getArchivedExecutionConfig(),
executionGraph.isStoppable(),
executionGraph.getCheckpointCoordinatorConfiguration(),
executionGraph.getCheckpointStatsSnapshot(),
executionGraph.getStateBackendName().orElse(null),
executionGraph.getCheckpointStorageName().orElse(null),
executionGraph.isChangelogStateBackendEnabled(),
executionGraph.getChangelogStorageName().orElse(null),
executionGraph.getStreamGraphJson(),
executionGraph.getPendingOperatorCount());
}
/**
* Create a sparse ArchivedExecutionGraph for a job. Most fields will be empty, only job status
* and error-related fields are set.
*/
public static ArchivedExecutionGraph createSparseArchivedExecutionGraph(
JobID jobId,
String jobName,
JobStatus jobStatus,
@Nullable JobType jobType,
@Nullable Throwable throwable,
@Nullable JobCheckpointingSettings checkpointingSettings,
long initializationTimestamp) {
return createSparseArchivedExecutionGraph(
jobId,
jobName,
jobStatus,
jobType,
Collections.emptyMap(),
Collections.emptyList(),
throwable,
checkpointingSettings,
initializationTimestamp);
}
public static ArchivedExecutionGraph createSparseArchivedExecutionGraphWithJobVertices(
JobID jobId,
String jobName,
JobStatus jobStatus,
JobType jobType,
@Nullable Throwable throwable,
@Nullable JobCheckpointingSettings checkpointingSettings,
long initializationTimestamp,
Iterable<JobVertex> jobVertices,
VertexParallelismStore initialParallelismStore) {
final Map<JobVertexID, ArchivedExecutionJobVertex> archivedJobVertices = new HashMap<>();
final List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>();
for (JobVertex jobVertex : jobVertices) {
final VertexParallelismInformation parallelismInfo =
initialParallelismStore.getParallelismInfo(jobVertex.getID());
ArchivedExecutionJobVertex archivedJobVertex =
new ArchivedExecutionJobVertex(
new ArchivedExecutionVertex[0],
jobVertex.getID(),
jobVertex.getName(),
parallelismInfo.getParallelism(),
parallelismInfo.getMaxParallelism(),
jobVertex.getSlotSharingGroup(),
ResourceProfile.fromResourceSpec(
jobVertex.getMinResources(), MemorySize.ZERO),
new StringifiedAccumulatorResult[0]);
archivedVerticesInCreationOrder.add(archivedJobVertex);
archivedJobVertices.put(archivedJobVertex.getJobVertexId(), archivedJobVertex);
}
return createSparseArchivedExecutionGraph(
jobId,
jobName,
jobStatus,
jobType,
archivedJobVertices,
archivedVerticesInCreationOrder,
throwable,
checkpointingSettings,
initializationTimestamp);
}
private static ArchivedExecutionGraph createSparseArchivedExecutionGraph(
JobID jobId,
String jobName,
JobStatus jobStatus,
JobType jobType,
Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks,
List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder,
@Nullable Throwable throwable,
@Nullable JobCheckpointingSettings checkpointingSettings,
long initializationTimestamp) {
final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators =
Collections.emptyMap();
StringifiedAccumulatorResult[] archivedUserAccumulators =
new StringifiedAccumulatorResult[] {};
final long[] timestamps = new long[JobStatus.values().length];
timestamps[JobStatus.INITIALIZING.ordinal()] = initializationTimestamp;
JobPlanInfo.Plan plan = new JobPlanInfo.Plan("", "", "", new ArrayList<>());
ErrorInfo failureInfo = null;
if (throwable != null) {
Preconditions.checkState(
jobStatus == JobStatus.FAILED || jobStatus == JobStatus.SUSPENDED);
long failureTime = System.currentTimeMillis();
failureInfo = new ErrorInfo(throwable, failureTime);
timestamps[jobStatus.ordinal()] = failureTime;
}
return new ArchivedExecutionGraph(
jobId,
jobName,
archivedTasks,
archivedVerticesInCreationOrder,
timestamps,
jobStatus,
jobType,
failureInfo,
plan,
archivedUserAccumulators,
serializedUserAccumulators,
new ExecutionConfig().archive(),
false,
checkpointingSettings == null
? null
: checkpointingSettings.getCheckpointCoordinatorConfiguration(),
checkpointingSettings == null ? null : CheckpointStatsSnapshot.empty(),
checkpointingSettings == null ? null : "Unknown",
checkpointingSettings == null ? null : "Unknown",
checkpointingSettings == null
? TernaryBoolean.UNDEFINED
: checkpointingSettings.isChangelogStateBackendEnabled(),
checkpointingSettings == null ? null : "Unknown",
null,
0);
}
}
|
value
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/util/Strings.java
|
{
"start": 468,
"end": 6106
}
|
class ____ {
private static final Set<String> KEYWORDS = asSet(
"abstract",
"continue",
"for",
"new",
"switch",
"assert",
"default",
"goto",
"package",
"synchronized",
"boolean",
"do",
"if",
"private",
"this",
"break",
"double",
"implements",
"protected",
"throw",
"byte",
"else",
"import",
"public",
"throws",
"case",
"enum",
"instanceof",
"return",
"transient",
"catch",
"extends",
"int",
"short",
"try",
"char",
"final",
"interface",
"static",
"void",
"class",
"finally",
"long",
"strictfp",
"volatile",
"const",
"float",
"native",
"super",
"while"
);
private static final char UNDERSCORE = '_';
private Strings() {
}
public static String capitalize(String string) {
return string == null ? null : string.substring( 0, 1 ).toUpperCase( Locale.ROOT ) + string.substring( 1 );
}
public static String decapitalize(String string) {
return string == null ? null : string.substring( 0, 1 ).toLowerCase( Locale.ROOT ) + string.substring( 1 );
}
public static String join(Iterable<?> iterable, String separator) {
return join( iterable, separator, null );
}
public static <T> String join(Iterable<T> iterable, String separator, Extractor<T, String> extractor) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for ( T object : iterable ) {
if ( !isFirst ) {
sb.append( separator );
}
else {
isFirst = false;
}
sb.append( extractor == null ? object : extractor.apply( object ) );
}
return sb.toString();
}
public static String joinAndCamelize(Iterable<?> iterable) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for ( Object object : iterable ) {
if ( !isFirst ) {
sb.append( capitalize( object.toString() ) );
}
else {
sb.append( object );
isFirst = false;
}
}
return sb.toString();
}
public static boolean isEmpty(String string) {
return string == null || string.isEmpty();
}
public static boolean isNotEmpty(String string) {
return !isEmpty( string );
}
public static String getSafeVariableName(String name, String... existingVariableNames) {
return getSafeVariableName( name, Arrays.asList( existingVariableNames ) );
}
/**
* Returns a variable name which doesn't conflict with the given variable names existing in the same scope and the
* Java keywords.
*
* @param name the name to get a safe version for
* @param existingVariableNames the names of other variables existing in the same scope
*
* @return a variable name based on the given original name, not conflicting with any of the given other names or
* any Java keyword; starting with a lower-case letter
*/
public static String getSafeVariableName(String name, Collection<String> existingVariableNames) {
name = decapitalize( sanitizeIdentifierName( name ) );
name = joinAndCamelize( extractParts( name ) );
Set<String> conflictingNames = new HashSet<>( KEYWORDS );
conflictingNames.addAll( existingVariableNames );
if ( !conflictingNames.contains( name ) ) {
return name;
}
int c = 1;
String separator = Character.isDigit( name.charAt( name.length() - 1 ) ) ? "_" : "";
while ( conflictingNames.contains( name + separator + c ) ) {
c++;
}
return name + separator + c;
}
/**
* @param identifier identifier to sanitize
* @return the identifier without any characters that are not allowed as part of a Java identifier.
*/
public static String sanitizeIdentifierName(String identifier) {
if ( identifier != null && identifier.length() > 0 ) {
int firstAlphabeticIndex = 0;
while ( firstAlphabeticIndex < identifier.length() &&
( identifier.charAt( firstAlphabeticIndex ) == UNDERSCORE ||
Character.isDigit( identifier.charAt( firstAlphabeticIndex ) ) ) ) {
firstAlphabeticIndex++;
}
if ( firstAlphabeticIndex < identifier.length()) {
// If it is not consisted of only underscores
String firstAlphaString = identifier.substring( firstAlphabeticIndex ).replace( "[]", "Array" );
StringBuilder sb = new StringBuilder( firstAlphaString.length() );
for ( int i = 0; i < firstAlphaString.length(); i++ ) {
int codePoint = firstAlphaString.codePointAt( i );
if ( Character.isJavaIdentifierPart( codePoint ) || codePoint == '.') {
sb.appendCodePoint( codePoint );
}
else {
sb.append( '_' );
}
}
return sb.toString();
}
return identifier.replace( "[]", "Array" );
}
return identifier;
}
/**
* Returns a stub property name from full
|
Strings
|
java
|
apache__camel
|
components/camel-huawei/camel-huaweicloud-obs/src/test/java/org/apache/camel/component/huaweicloud/obs/DownloadSingleFunctionalTest.java
|
{
"start": 1555,
"end": 4015
}
|
class ____ extends CamelTestSupport {
private static final String ACCESS_KEY = "replace_this_with_access_key";
private static final String SECRET_KEY = "replace_this_with_secret_key";
private static final String REGION = "replace_this_with_region";
private static final String BUCKET_NAME = "replace_this_with_bucket_name";
private static final String FILENAME = "replace_this_with_filename";
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(ACCESS_KEY, SECRET_KEY);
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("hwcloud-obs:?" +
"serviceKeys=#serviceKeys" +
"®ion=" + REGION +
"&bucketName=" + BUCKET_NAME +
"&fileName=" + FILENAME +
"&ignoreSslVerification=true" +
"&deleteAfterRead=false" +
"&moveAfterRead=false")
.log("Download objects successful")
.to("log:LOG?showAll=true")
.to("mock:download_objects_result");
}
};
}
/**
* The following test cases should be manually enabled to perform test against the actual HuaweiCloud OBS server
* with real user credentials. To perform this test, manually comment out the @Ignore annotation and enter relevant
* service parameters in the placeholders above (static variables of this test class)
*
* @throws Exception
*/
@Disabled("Manually enable this once you configure the parameters in the placeholders above")
@Test
public void testListBuckets() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:download_objects_result");
mock.expectedMinimumMessageCount(1);
mock.assertIsSatisfied();
Exchange responseExchange = mock.getExchanges().get(0);
assertEquals(1, mock.getExchanges().size());
assertEquals(FILENAME, responseExchange.getIn().getHeader(OBSHeaders.OBJECT_KEY, String.class));
if (responseExchange.getIn().getHeader(Exchange.CONTENT_LENGTH, Integer.class) > 0) {
assertNotNull(responseExchange.getIn().getBody(String.class));
assertTrue(responseExchange.getIn().getBody(String.class).length() > 0);
}
}
}
|
DownloadSingleFunctionalTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java
|
{
"start": 15775,
"end": 16419
}
|
class ____ {
public void foo(Suit suit) {
switch (suit) {
case HEART:
System.out.println("heart");
break;
case DIAMOND:
System.out.println("diamond");
return;
default /* comment: */:
System.out.println("club");
break;
case SPADE:
System.out.println("spade");
}
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
apache__camel
|
components/camel-csv/src/test/java/org/apache/camel/dataformat/csv/CsvMarshalHeaderWithCustomMarshallFactoryTest.java
|
{
"start": 1896,
"end": 4155
}
|
class ____ extends CamelTestSupport {
@TempDir
public File folder;
@Produce("direct:start")
private ProducerTemplate producerTemplate;
private File outputFile;
@Override
protected void doPreSetup() {
outputFile = new File(folder, "output.csv");
}
@Test
void testSendBody() throws IOException {
Map<String, String> body = new LinkedHashMap<>();
body.put("first_name", "John");
body.put("last_name", "Doe");
String fileName = outputFile.getName();
assertEquals("output.csv", fileName);
producerTemplate.sendBodyAndHeader(body, Exchange.FILE_NAME, fileName);
body = new LinkedHashMap<>();
body.put("first_name", "Max");
body.put("last_name", "Mustermann");
producerTemplate.sendBodyAndHeader(body, Exchange.FILE_NAME, fileName);
try (Stream<String> stream = Files.lines(Paths.get(outputFile.toURI()))
.filter(l -> !l.isBlank())) {
List<String> lines = stream.toList();
assertEquals(3, lines.size());
}
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String uri
= String.format("file:%s?charset=utf-8&fileExist=Append", outputFile.getParentFile().getAbsolutePath());
from("direct:start").marshal(createCsvDataFormat()).to(uri);
}
};
}
private static CsvDataFormat createCsvDataFormat() {
CsvDataFormat dataFormat = new CsvDataFormat();
dataFormat.setDelimiter('\t');
dataFormat.setTrim(true);
dataFormat.setIgnoreSurroundingSpaces(true);
dataFormat.setHeader(Arrays.asList("first_name", "last_name").toArray(new String[0]));
dataFormat.setMarshallerFactory(new CsvMarshallerFactory() {
@Override
public CsvMarshaller create(CSVFormat format, CsvDataFormat dataFormat) {
return new SinglePrinterCsvMarshaller(format);
}
});
return dataFormat;
}
//
// Helper classes
//
private static final
|
CsvMarshalHeaderWithCustomMarshallFactoryTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryResultTypeAutoDiscoveryTest.java
|
{
"start": 23018,
"end": 23187
}
|
class ____ extends TestedEntity<Boolean> {
public Boolean getTestedProperty() {
return testedProperty;
}
}
@Entity(name = "bitEntity")
public static
|
BooleanEntity
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/builditemtemplate/AdditionalTemplatePathTest.java
|
{
"start": 588,
"end": 2507
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addAsResource(new StringAsset("Hi {name}!"), "templates/hi.txt")
.addAsResource(new StringAsset("And... {#include foo/hello /}"), "templates/include.txt"))
.addBuildChainCustomizer(buildCustomizer());
static Consumer<BuildChainBuilder> buildCustomizer() {
return new Consumer<BuildChainBuilder>() {
@Override
public void accept(BuildChainBuilder builder) {
builder.addBuildStep(new BuildStep() {
@Override
public void execute(BuildContext context) {
context.produce(TemplatePathBuildItem.builder()
.path("foo/hello.txt")
.extensionInfo("test-ext")
.content("Hello {name}!").build());
}
}).produces(TemplatePathBuildItem.class)
.build();
}
};
}
@Inject
Engine engine;
@Test
public void testTemplate() {
assertEquals("Hi M!", engine.getTemplate("hi").data("name", "M").render());
assertEquals("Hello M!", engine.getTemplate("foo/hello.txt").data("name", "M").render());
assertEquals("Hello M!", engine.getTemplate("foo/hello").data("name", "M").render());
assertEquals("And... Hello M!", engine.getTemplate("include").data("name", "M").render());
// Test that reload works for additional content-based paths
engine.clearTemplates();
assertEquals("Hello M!", engine.getTemplate("foo/hello").data("name", "M").render());
assertEquals("Hello M!", engine.getTemplate("foo/hello.txt").data("name", "M").render());
}
}
|
AdditionalTemplatePathTest
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/result/method/RequestMappingInfoHandlerMapping.java
|
{
"start": 8712,
"end": 12250
}
|
class ____ {
private final List<PartialMatch> partialMatches = new ArrayList<>();
PartialMatchHelper(Set<RequestMappingInfo> infos, ServerWebExchange exchange) {
for (RequestMappingInfo info : infos) {
if (info.getPatternsCondition().getMatchingCondition(exchange) != null) {
this.partialMatches.add(new PartialMatch(info, exchange));
}
}
}
/**
* Whether there are any partial matches.
*/
public boolean isEmpty() {
return this.partialMatches.isEmpty();
}
/**
* Any partial matches for "methods"?
*/
public boolean hasMethodsMismatch() {
return this.partialMatches.stream().noneMatch(PartialMatch::hasMethodsMatch);
}
/**
* Any partial matches for "methods" and "consumes"?
*/
public boolean hasConsumesMismatch() {
return this.partialMatches.stream().noneMatch(PartialMatch::hasConsumesMatch);
}
/**
* Any partial matches for "methods", "consumes", and "produces"?
*/
public boolean hasProducesMismatch() {
return this.partialMatches.stream().noneMatch(PartialMatch::hasProducesMatch);
}
/**
* Any partial matches for "methods", "consumes", "produces", and "params"?
*/
public boolean hasParamsMismatch() {
return this.partialMatches.stream().noneMatch(PartialMatch::hasParamsMatch);
}
/**
* Return declared HTTP methods.
*/
public Set<HttpMethod> getAllowedMethods() {
return this.partialMatches.stream()
.flatMap(m -> m.getInfo().getMethodsCondition().getMethods().stream())
.map(requestMethod -> HttpMethod.valueOf(requestMethod.name()))
.collect(Collectors.toSet());
}
/**
* Return declared "consumable" types but only among those that also
* match the "methods" condition.
*/
public Set<MediaType> getConsumableMediaTypes() {
return this.partialMatches.stream()
.filter(PartialMatch::hasMethodsMatch)
.flatMap(m -> m.getInfo().getConsumesCondition().getConsumableMediaTypes().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
}
/**
* Return declared "producible" types but only among those that also
* match the "methods" and "consumes" conditions.
*/
public Set<MediaType> getProducibleMediaTypes() {
return this.partialMatches.stream()
.filter(PartialMatch::hasConsumesMatch)
.flatMap(m -> m.getInfo().getProducesCondition().getProducibleMediaTypes().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
}
/**
* Return declared "params" conditions but only among those that also
* match the "methods", "consumes", and "params" conditions.
*/
public List<Set<NameValueExpression<String>>> getParamConditions() {
return this.partialMatches.stream()
.filter(PartialMatch::hasProducesMatch)
.map(match -> match.getInfo().getParamsCondition().getExpressions())
.toList();
}
/**
* Return declared "consumable" types but only among those that have
* PATCH specified, or that have no methods at all.
*/
public Set<MediaType> getConsumablePatchMediaTypes() {
Set<MediaType> result = new LinkedHashSet<>();
for (PartialMatch match : this.partialMatches) {
Set<RequestMethod> methods = match.getInfo().getMethodsCondition().getMethods();
if (methods.isEmpty() || methods.contains(RequestMethod.PATCH)) {
result.addAll(match.getInfo().getConsumesCondition().getConsumableMediaTypes());
}
}
return result;
}
/**
* Container for a RequestMappingInfo that matches the URL path at least.
*/
private static
|
PartialMatchHelper
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CommitResponsePBImpl.java
|
{
"start": 1073,
"end": 2163
}
|
class ____ extends CommitResponse {
YarnServiceProtos.CommitResponseProto proto =
YarnServiceProtos.CommitResponseProto.getDefaultInstance();
YarnServiceProtos.CommitResponseProto.Builder builder = null;
boolean viaProto = false;
public CommitResponsePBImpl() {
builder = YarnServiceProtos.CommitResponseProto.newBuilder();
}
public CommitResponsePBImpl(YarnServiceProtos.CommitResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public YarnServiceProtos.CommitResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
|
CommitResponsePBImpl
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/TestInstanceLifecycleTests.java
|
{
"start": 42026,
"end": 43494
}
|
class ____ {
NestedInstancePerClassTestCase() {
incrementInstanceCount(NestedInstancePerClassTestCase.class);
}
@BeforeAll
void beforeAll(TestInfo testInfo) {
assertNotNull(testInfo);
beforeAllCount++;
}
@BeforeEach
void beforeEach() {
beforeEachCount++;
}
@SuppressWarnings("NullAway")
@Test
void test1(TestInfo testInfo) {
assertSame(this, instanceMap.get(postProcessTestInstanceKey(getClass())).getInnermostInstance());
testsInvoked.add(testInfo.getTestMethod().get().getName());
}
@SuppressWarnings("NullAway")
@Test
void test2(TestInfo testInfo) {
assertSame(this, instanceMap.get(postProcessTestInstanceKey(getClass())).getInnermostInstance());
testsInvoked.add(testInfo.getTestMethod().get().getName());
}
@SuppressWarnings("NullAway")
@SingletonTest
void singletonTest(TestInfo testInfo) {
assertSame(this, instanceMap.get(postProcessTestInstanceKey(getClass())).getInnermostInstance());
testsInvoked.add(testInfo.getTestMethod().get().getName());
}
@AfterEach
void afterEach() {
afterEachCount++;
}
@AfterAll
void afterAll(TestInfo testInfo) {
assertNotNull(testInfo);
afterAllCount++;
}
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ExtendWith(InstanceTrackingExtension.class)
// The following is commented out b/c it's the default.
// @TestInstance(Lifecycle.PER_METHOD)
static
|
NestedInstancePerClassTestCase
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java
|
{
"start": 196523,
"end": 200886
}
|
class ____ extends NetworkClientDelegate {
private final Logger log = LoggerFactory.getLogger(TestableNetworkClientDelegate.class);
private final ConcurrentLinkedQueue<Node> pendingDisconnects = new ConcurrentLinkedQueue<>();
public TestableNetworkClientDelegate(Time time,
ConsumerConfig config,
LogContext logContext,
KafkaClient client,
Metadata metadata,
BackgroundEventHandler backgroundEventHandler,
boolean notifyMetadataErrorsViaErrorQueue) {
super(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, mock(AsyncConsumerMetrics.class));
}
@Override
public void poll(final long timeoutMs, final long currentTimeMs) {
handlePendingDisconnects();
super.poll(timeoutMs, currentTimeMs);
}
public void pollNoWakeup() {
poll(time.timer(0));
}
public int pendingRequestCount() {
return unsentRequests().size() + client.inFlightRequestCount();
}
public void poll(final Timer timer) {
long pollTimeout = Math.min(timer.remainingMs(), requestTimeoutMs);
if (client.inFlightRequestCount() == 0)
pollTimeout = Math.min(pollTimeout, retryBackoffMs);
poll(pollTimeout, timer.currentTimeMs());
}
private Set<Node> unsentRequestNodes() {
Set<Node> set = new HashSet<>();
for (UnsentRequest u : unsentRequests())
u.node().ifPresent(set::add);
return set;
}
private List<UnsentRequest> removeUnsentRequestByNode(Node node) {
List<UnsentRequest> list = new ArrayList<>();
Iterator<UnsentRequest> iter = unsentRequests().iterator();
while (iter.hasNext()) {
UnsentRequest u = iter.next();
if (node.equals(u.node().orElse(null))) {
iter.remove();
list.add(u);
}
}
return list;
}
@Override
protected void checkDisconnects(final long currentTimeMs, boolean onClose) {
// any disconnects affecting requests that have already been transmitted will be handled
// by NetworkClient, so we just need to check whether connections for any of the unsent
// requests have been disconnected; if they have, then we complete the corresponding future
// and set the disconnect flag in the ClientResponse
for (Node node : unsentRequestNodes()) {
if (client.connectionFailed(node)) {
// Remove entry before invoking request callback to avoid callbacks handling
// coordinator failures traversing the unsent list again.
for (UnsentRequest unsentRequest : removeUnsentRequestByNode(node)) {
// TODO: this should likely emulate what's done in ConsumerNetworkClient
log.error("checkDisconnects - please update! unsentRequest: {}", unsentRequest);
}
}
}
}
private void handlePendingDisconnects() {
while (true) {
Node node = pendingDisconnects.poll();
if (node == null)
break;
failUnsentRequests(node, DisconnectException.INSTANCE);
client.disconnect(node.idString());
}
}
public void disconnectAsync(Node node) {
pendingDisconnects.offer(node);
client.wakeup();
}
private void failUnsentRequests(Node node, RuntimeException e) {
// clear unsent requests to node and fail their corresponding futures
for (UnsentRequest unsentRequest : removeUnsentRequestByNode(node)) {
FutureCompletionHandler handler = unsentRequest.handler();
handler.onFailure(time.milliseconds(), e);
}
}
}
}
|
TestableNetworkClientDelegate
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DataSetTestEndpointBuilderFactory.java
|
{
"start": 29797,
"end": 30139
}
|
class ____ extends AbstractEndpointBuilder implements DataSetTestEndpointBuilder, AdvancedDataSetTestEndpointBuilder {
public DataSetTestEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new DataSetTestEndpointBuilderImpl(path);
}
}
|
DataSetTestEndpointBuilderImpl
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/InjectAnnotationBeanPostProcessorTests.java
|
{
"start": 35816,
"end": 36924
}
|
class ____ {
protected ITestBean testBean3;
private ITestBean testBean4;
private List<NestedTestBean> nestedTestBeans;
public ConstructorsCollectionResourceInjectionBean() {
}
public ConstructorsCollectionResourceInjectionBean(ITestBean testBean3) {
this.testBean3 = testBean3;
}
@Inject
public ConstructorsCollectionResourceInjectionBean(ITestBean testBean4, List<NestedTestBean> nestedTestBeans) {
this.testBean4 = testBean4;
this.nestedTestBeans = nestedTestBeans;
}
public ConstructorsCollectionResourceInjectionBean(NestedTestBean nestedTestBean) {
throw new UnsupportedOperationException();
}
public ConstructorsCollectionResourceInjectionBean(ITestBean testBean3, ITestBean testBean4,
NestedTestBean nestedTestBean) {
throw new UnsupportedOperationException();
}
public ITestBean getTestBean3() {
return this.testBean3;
}
public ITestBean getTestBean4() {
return this.testBean4;
}
public List<NestedTestBean> getNestedTestBeans() {
return this.nestedTestBeans;
}
}
public static
|
ConstructorsCollectionResourceInjectionBean
|
java
|
grpc__grpc-java
|
android/src/test/java/io/grpc/android/AndroidChannelBuilderTest.java
|
{
"start": 2042,
"end": 10651
}
|
class ____ {
private final android.net.NetworkInfo WIFI_CONNECTED =
ShadowNetworkInfo.newInstance(
android.net.NetworkInfo.DetailedState.CONNECTED,
ConnectivityManager.TYPE_WIFI,
0,
true,
android.net.NetworkInfo.State.CONNECTED);
private final android.net.NetworkInfo WIFI_DISCONNECTED =
ShadowNetworkInfo.newInstance(
android.net.NetworkInfo.DetailedState.DISCONNECTED,
ConnectivityManager.TYPE_WIFI,
0,
true,
android.net.NetworkInfo.State.DISCONNECTED);
private final android.net.NetworkInfo MOBILE_CONNECTED =
ShadowNetworkInfo.newInstance(
android.net.NetworkInfo.DetailedState.CONNECTED,
ConnectivityManager.TYPE_MOBILE,
ConnectivityManager.TYPE_MOBILE_MMS,
true,
android.net.NetworkInfo.State.CONNECTED);
private final android.net.NetworkInfo MOBILE_DISCONNECTED =
ShadowNetworkInfo.newInstance(
android.net.NetworkInfo.DetailedState.DISCONNECTED,
ConnectivityManager.TYPE_MOBILE,
ConnectivityManager.TYPE_MOBILE_MMS,
true,
android.net.NetworkInfo.State.DISCONNECTED);
private ConnectivityManager connectivityManager;
@Before
public void setUp() {
connectivityManager =
(ConnectivityManager)
ApplicationProvider
.getApplicationContext()
.getSystemService(Context.CONNECTIVITY_SERVICE);
}
@Test
public void channelBuilderClassFoundReflectively() {
// This should not throw with OkHttpChannelBuilder on the classpath
AndroidChannelBuilder.forTarget("target");
}
@Test
public void usingBuilderConstructor() {
OkHttpChannelBuilder wrappedBuilder = OkHttpChannelBuilder.forTarget("target");
AndroidChannelBuilder androidBuilder = AndroidChannelBuilder.usingBuilder(wrappedBuilder);
assertThat(androidBuilder.delegate()).isSameInstanceAs(wrappedBuilder);
}
@Test
@Config(sdk = 23)
public void nullContextDoesNotThrow_api23() {
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel = new AndroidChannelBuilder.AndroidChannel(delegateChannel, null);
// Network change and shutdown should be no-op for the channel without an Android Context
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_CONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
androidChannel.shutdown();
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
}
@Test
@Config(sdk = 24)
public void nullContextDoesNotThrow_api24() {
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_DISCONNECTED);
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel = new AndroidChannelBuilder.AndroidChannel(delegateChannel, null);
// Network change and shutdown should be no-op for the channel without an Android Context
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
androidChannel.shutdown();
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
}
@Test
@Config(sdk = 23)
public void networkChanges_api23() {
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel =
new AndroidChannelBuilder.AndroidChannel(
delegateChannel, ApplicationProvider.getApplicationContext());
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
// On API levels < 24, the broadcast receiver will invoke enterIdle() on the first
// connectivity action broadcast regardless of previous connection status
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_CONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(1);
// Drop the connection
shadowOf(connectivityManager).setActiveNetworkInfo(null);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(1);
// Notify that a new but not connected network is available
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_DISCONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(1);
// Establish a connection
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(2);
// Disconnect, then shutdown the channel and verify that the broadcast receiver has been
// unregistered
shadowOf(connectivityManager).setActiveNetworkInfo(null);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
androidChannel.shutdown();
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(2);
}
@Test
@Config(sdk = 24)
public void networkChanges_api24() {
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_DISCONNECTED);
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel =
new AndroidChannelBuilder.AndroidChannel(
delegateChannel, ApplicationProvider.getApplicationContext());
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
// Establish an initial network connection
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
assertThat(delegateChannel.enterIdleCount).isEqualTo(1);
// Switch to another network to trigger enterIdle()
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_CONNECTED);
assertThat(delegateChannel.enterIdleCount).isEqualTo(2);
// Switch to an offline network and then to null
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_DISCONNECTED);
shadowOf(connectivityManager).setActiveNetworkInfo(null);
assertThat(delegateChannel.enterIdleCount).isEqualTo(2);
// Establish a connection
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
assertThat(delegateChannel.enterIdleCount).isEqualTo(3);
// Disconnect, then shutdown the channel and verify that the callback has been unregistered
shadowOf(connectivityManager).setActiveNetworkInfo(null);
androidChannel.shutdown();
shadowOf(connectivityManager).setActiveNetworkInfo(MOBILE_CONNECTED);
assertThat(delegateChannel.enterIdleCount).isEqualTo(3);
}
@Test
@Config(sdk = 23)
public void shutdownNowUnregistersBroadcastReceiver_api23() {
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel =
new AndroidChannelBuilder.AndroidChannel(
delegateChannel, ApplicationProvider.getApplicationContext());
shadowOf(connectivityManager).setActiveNetworkInfo(null);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
androidChannel.shutdownNow();
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_CONNECTED);
ApplicationProvider
.getApplicationContext()
.sendBroadcast(new Intent(ConnectivityManager.CONNECTIVITY_ACTION));
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
}
@Test
@Config(sdk = 24)
public void shutdownNowUnregistersNetworkCallback_api24() {
shadowOf(connectivityManager).setActiveNetworkInfo(null);
TestChannel delegateChannel = new TestChannel();
ManagedChannel androidChannel =
new AndroidChannelBuilder.AndroidChannel(
delegateChannel, ApplicationProvider.getApplicationContext());
androidChannel.shutdownNow();
shadowOf(connectivityManager).setActiveNetworkInfo(WIFI_CONNECTED);
assertThat(delegateChannel.enterIdleCount).isEqualTo(0);
}
/**
* Extends Robolectric ShadowConnectivityManager to handle Android N's
* registerDefaultNetworkCallback API.
*/
@Implements(value = ConnectivityManager.class)
public static
|
AndroidChannelBuilderTest
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/AbstractJUnit4InitMethodNotRun.java
|
{
"start": 1749,
"end": 2033
}
|
class ____ JUnit4SetUp/TearDown not run. This will take care of the nitty-gritty about
* replacing @After with @Before, adding @Before on unannotated methods, making them public if
* necessary, fixing the imports of other @Before, etc.
*
* @author glorioso@google.com
*/
abstract
|
for
|
java
|
apache__camel
|
components/camel-ftp/src/generated/java/org/apache/camel/component/file/remote/FtpComponentConfigurer.java
|
{
"start": 738,
"end": 3494
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
FtpComponent target = (FtpComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": target.setHealthCheckConsumerEnabled(property(camelContext, boolean.class, value)); return true;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": target.setHealthCheckProducerEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return boolean.class;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
FtpComponent target = (FtpComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return target.isHealthCheckConsumerEnabled();
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return target.isHealthCheckProducerEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
|
FtpComponentConfigurer
|
java
|
quarkusio__quarkus
|
integration-tests/jsonb/src/test/java/io/quarkus/it/jsonb/ModelWithSerializerAndDeserializerOnFieldResourceTest.java
|
{
"start": 372,
"end": 1343
}
|
class ____ {
@Test
public void testSerializer() throws IOException {
given()
.contentType("application/json")
.when().get("/fieldserder/tester/whatever")
.then()
.statusCode(200)
.body("name", equalTo("tester"))
.body("inner.someValue", equalTo("unchangeable"));
}
@Test
public void testDeserializer() throws IOException {
Jsonb jsonb = JsonbBuilder.create();
given()
.contentType("application/json")
.body(jsonb.toJson(
new ModelWithSerializerAndDeserializerOnField("tester",
new ModelWithSerializerAndDeserializerOnField.Inner())))
.when().post("/fieldserder")
.then()
.statusCode(200)
.body(is("tester/immutable"));
}
}
|
ModelWithSerializerAndDeserializerOnFieldResourceTest
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/engine/discovery/DiscoverySelectorsTests.java
|
{
"start": 64285,
"end": 65675
}
|
class ____ {
@Test
void selectsUniqueId() {
var selector = selectUniqueId(uniqueIdForMethod(DiscoverySelectorsTests.class, "myTest(int)"));
assertThat(selector.getUniqueId()).isNotNull();
assertThat(parseIdentifier(selector)).isEqualTo(selector);
}
}
// -------------------------------------------------------------------------
private static DiscoverySelector parseIdentifier(DiscoverySelector selector) {
return DiscoverySelectors.parse(toIdentifierString(selector)).orElseThrow();
}
private static Stream<? extends DiscoverySelector> parseIdentifiers(
Collection<? extends DiscoverySelector> selectors) {
return DiscoverySelectors.parseAll(
selectors.stream().map(it -> DiscoverySelectorIdentifier.parse(toIdentifierString(it))).toList());
}
private static String toIdentifierString(DiscoverySelector selector) {
return selector.toIdentifier().orElseThrow().toString();
}
private static String fqmn(Class<?>... params) {
return fqmn(DiscoverySelectorsTests.class, "myTest", params);
}
private static String fqmn(Class<?> clazz, String methodName, Class<?>... params) {
return ReflectionUtils.getFullyQualifiedMethodName(clazz, methodName, params);
}
private static String fqmnWithParamNames(String... params) {
return "%s#%s(%s)".formatted(DiscoverySelectorsTests.class.getName(), "myTest", join(", ", params));
}
|
SelectUniqueIdTests
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/spring/transaction/RedissonTransactionContextConfig.java
|
{
"start": 420,
"end": 1049
}
|
class ____ implements DisposableBean {
@Bean
public TransactionalBean2 transactionBean2() {
return new TransactionalBean2();
}
@Bean
public TransactionalBean transactionBean() {
return new TransactionalBean();
}
@Bean
public RedissonTransactionManager transactionManager(RedissonClient redisson) {
return new RedissonTransactionManager(redisson);
}
@Bean
public RedissonClient redisson() {
return RedisDockerTest.createInstance();
}
public void destroy() {
redisson().shutdown();
}
}
|
RedissonTransactionContextConfig
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java
|
{
"start": 3307,
"end": 61338
}
|
class ____ extends ESSingleNodeTestCase {
private static final String NON_EXISTING_CONNECTOR_ID = "non-existing-connector-id";
private static final String NON_EXISTING_SYNC_JOB_ID = "non-existing-sync-job-id";
private static final String LAST_SEEN_FIELD_NAME = ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName();
private static final int TIMEOUT_SECONDS = 10;
private static final int ONE_SECOND_IN_MILLIS = 1000;
private ConnectorSyncJobIndexService connectorSyncJobIndexService;
private String connectorOneId;
private String connectorTwoId;
private String connectorThreeId;
private String connectorFourId;
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
List<Class<? extends Plugin>> plugins = new ArrayList<>(super.getPlugins());
// Reindex plugin is required for testDeleteAllSyncJobsByConnectorId (supports delete_by_query)
plugins.add(ReindexPlugin.class);
return plugins;
}
@Before
public void setup() throws Exception {
registerSimplifiedConnectorIndexTemplates(indicesAdmin());
connectorOneId = createConnector(ConnectorTestUtils.getRandomConnector());
connectorTwoId = createConnector(ConnectorTestUtils.getRandomConnector());
connectorThreeId = createConnector(ConnectorTestUtils.getRandomConnectorWithDetachedIndex());
connectorFourId = createConnector(ConnectorTestUtils.getRandomConnectorWithServiceTypeNotDefined());
this.connectorSyncJobIndexService = new ConnectorSyncJobIndexService(client());
}
private String createConnector(Connector connector) throws IOException, InterruptedException, ExecutionException, TimeoutException {
final IndexRequest indexRequest = new IndexRequest(ConnectorIndexService.CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX)
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.source(connector.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS));
ActionFuture<DocWriteResponse> index = client().index(indexRequest);
// wait 10 seconds for connector creation
return index.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).getId();
}
public void testCreateConnectorSyncJob() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
ConnectorSyncJobType requestJobType = syncJobRequest.getJobType();
ConnectorSyncJobTriggerMethod requestTriggerMethod = syncJobRequest.getTriggerMethod();
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
ConnectorSyncJob connectorSyncJob = awaitGetConnectorSyncJob(response.getId());
assertThat(connectorSyncJob.getJobType(), equalTo(requestJobType));
assertThat(connectorSyncJob.getTriggerMethod(), equalTo(requestTriggerMethod));
assertThat(connectorSyncJob.getStatus(), equalTo(ConnectorSyncJob.DEFAULT_INITIAL_STATUS));
assertThat(connectorSyncJob.getCreatedAt(), equalTo(connectorSyncJob.getLastSeen()));
assertThat(connectorSyncJob.getTotalDocumentCount(), equalTo(0L));
assertThat(connectorSyncJob.getIndexedDocumentCount(), equalTo(0L));
assertThat(connectorSyncJob.getIndexedDocumentVolume(), equalTo(0L));
assertThat(connectorSyncJob.getDeletedDocumentCount(), equalTo(0L));
}
public void testCreateConnectorSyncJob_WithAccessControlJobType_IndexIsPrefixed() throws Exception {
PostConnectorSyncJobAction.Request createAccessControlJobRequest = ConnectorSyncJobTestUtils
.getRandomPostConnectorSyncJobActionRequest(connectorOneId, ConnectorSyncJobType.ACCESS_CONTROL);
PostConnectorSyncJobAction.Response createAccessControlJobResponse = awaitPutConnectorSyncJob(createAccessControlJobRequest);
ConnectorSyncJob connectorSyncJob = awaitGetConnectorSyncJob(createAccessControlJobResponse.getId());
assertThat(connectorSyncJob.getJobType(), equalTo(ConnectorSyncJobType.ACCESS_CONTROL));
assertTrue(connectorSyncJob.getConnector().getIndexName().startsWith(ACCESS_CONTROL_INDEX_PREFIX));
}
public void testCreateConnectorSyncJob_WithMissingJobType_ExpectDefaultJobTypeToBeSet() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
connectorOneId,
null,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
ConnectorSyncJob connectorSyncJob = awaitGetConnectorSyncJob(response.getId());
assertThat(connectorSyncJob.getJobType(), equalTo(ConnectorSyncJob.DEFAULT_JOB_TYPE));
}
public void testCreateConnectorSyncJob_WithMissingTriggerMethod_ExpectDefaultTriggerMethodToBeSet() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
connectorOneId,
ConnectorSyncJobType.FULL,
null
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
ConnectorSyncJob connectorSyncJob = awaitGetConnectorSyncJob(response.getId());
assertThat(connectorSyncJob.getTriggerMethod(), equalTo(ConnectorSyncJob.DEFAULT_TRIGGER_METHOD));
}
public void testCreateConnectorSyncJob_WithMissingConnectorId_ExpectException() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
NON_EXISTING_CONNECTOR_ID,
ConnectorSyncJobType.FULL,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
awaitPutConnectorSyncJobExpectingException(
syncJobRequest,
ActionListener.wrap(response -> {}, exception -> assertThat(exception.getMessage(), containsString(NON_EXISTING_CONNECTOR_ID)))
);
}
public void testDeleteConnectorSyncJob_WithDetachedConnectorIndex_ExpectException() {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
connectorThreeId,
ConnectorSyncJobType.FULL,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
expectThrows(ElasticsearchStatusException.class, () -> awaitPutConnectorSyncJob(syncJobRequest));
}
public void testDeleteConnectorSyncJob_WithServiceTypeNotDefined_ExpectException() {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
connectorFourId,
ConnectorSyncJobType.FULL,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
expectThrows(ElasticsearchStatusException.class, () -> awaitPutConnectorSyncJob(syncJobRequest));
}
public void testDeleteConnectorSyncJob_WithNonExistentConnectorId_ExpectException() {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
"non-existent-connector-id",
ConnectorSyncJobType.FULL,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
expectThrows(ResourceNotFoundException.class, () -> awaitPutConnectorSyncJob(syncJobRequest));
}
public void testDeleteConnectorSyncJob() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
assertThat(syncJobId, notNullValue());
DeleteResponse deleteResponse = awaitDeleteConnectorSyncJob(syncJobId);
assertThat(deleteResponse.status(), equalTo(RestStatus.OK));
}
public void testDeleteConnectorSyncJob_WithMissingSyncJobId_ExpectException() {
expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID));
}
public void testDeleteAllSyncJobsByConnectorId() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request(
connectorOneId,
ConnectorSyncJobType.FULL,
ConnectorSyncJobTriggerMethod.ON_DEMAND
);
int numJobs = 5;
// Create 5 jobs associated with connector
for (int i = 0; i < numJobs; i++) {
awaitPutConnectorSyncJob(syncJobRequest);
}
BulkByScrollResponse response = awaitDeleteAllSyncJobsByConnectorId(connectorOneId);
// 5 jobs should be deleted
assertEquals(numJobs, response.getDeleted());
response = awaitDeleteAllSyncJobsByConnectorId(connectorOneId);
// No jobs should be deleted
assertEquals(0, response.getDeleted());
}
public void testDeleteAllSyncJobsByConnectorId_NonExistentConnector() throws Exception {
BulkByScrollResponse response = awaitDeleteAllSyncJobsByConnectorId("non-existent-connector");
// 0 jobs should be deleted
assertEquals(0, response.getDeleted());
}
public void testGetConnectorSyncJob() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
ConnectorSyncJobType jobType = syncJobRequest.getJobType();
ConnectorSyncJobTriggerMethod triggerMethod = syncJobRequest.getTriggerMethod();
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
ConnectorSyncJob syncJob = awaitGetConnectorSyncJob(syncJobId);
assertThat(syncJob.getId(), equalTo(syncJobId));
assertThat(syncJob.getJobType(), equalTo(jobType));
assertThat(syncJob.getTriggerMethod(), equalTo(triggerMethod));
assertThat(syncJob.getConnector().getConnectorId(), equalTo(connectorOneId));
}
public void testGetConnectorSyncJob_WithMissingSyncJobId_ExpectException() {
expectThrows(ResourceNotFoundException.class, () -> awaitGetConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID));
}
public void testCheckInConnectorSyncJob() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
Instant lastSeenBeforeUpdate = Instant.parse((String) syncJobSourceBeforeUpdate.get(LAST_SEEN_FIELD_NAME));
safeSleep(ONE_SECOND_IN_MILLIS);
UpdateResponse updateResponse = awaitCheckInConnectorSyncJob(syncJobId);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
Instant lastSeenAfterUpdate = Instant.parse((String) syncJobSourceAfterUpdate.get(LAST_SEEN_FIELD_NAME));
long secondsBetweenLastSeenBeforeAndAfterUpdate = ChronoUnit.SECONDS.between(lastSeenBeforeUpdate, lastSeenAfterUpdate);
assertThat("Wrong sync job was updated", syncJobId, equalTo(updateResponse.getId()));
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertTrue(
"[" + LAST_SEEN_FIELD_NAME + "] after the check in is not after [" + LAST_SEEN_FIELD_NAME + "] before the check in",
lastSeenAfterUpdate.isAfter(lastSeenBeforeUpdate)
);
assertThat(
"there must be at least one second between ["
+ LAST_SEEN_FIELD_NAME
+ "] after the check in and ["
+ LAST_SEEN_FIELD_NAME
+ "] before the check in",
secondsBetweenLastSeenBeforeAndAfterUpdate,
greaterThanOrEqualTo(1L)
);
assertFieldsExceptLastSeenDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testCheckInConnectorSyncJob_WithMissingSyncJobId_ExpectException() {
expectThrows(ResourceNotFoundException.class, () -> awaitCheckInConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID));
}
public void testCancelConnectorSyncJob_WithStatusInProgress_ExpectNextStatusCanceling() throws Exception {
// Create connector sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Object cancellationRequestedAtBeforeUpdate = syncJobSourceBeforeUpdate.get(
ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName()
);
assertThat(syncJobId, notNullValue());
assertThat(cancellationRequestedAtBeforeUpdate, nullValue());
assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELING)));
// Set sync job status to `in_progress`
updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.IN_PROGRESS);
// Cancel sync job
UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusAfterUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Instant cancellationRequestedAtAfterUpdate = Instant.parse(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName())
);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertThat(cancellationRequestedAtAfterUpdate, notNullValue());
assertThat(syncStatusAfterUpdate, equalTo(ConnectorSyncStatus.CANCELING));
assertFieldsExceptSyncStatusAndCancellationRequestedAtDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testCancelConnectorSyncJob_WithPendingState_ExpectNextStatusCanceled() throws Exception {
// Create pending sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Object canceledAtBeforeUpdate = syncJobSourceBeforeUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName());
assertThat(syncJobId, notNullValue());
assertThat(canceledAtBeforeUpdate, nullValue());
assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELED)));
// Cancel sync job
UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusAfterUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Instant canceledAtAfterUpdate = Instant.parse(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName())
);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertThat(canceledAtAfterUpdate, notNullValue());
assertThat(syncStatusAfterUpdate, equalTo(ConnectorSyncStatus.CANCELED));
assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testCancelConnectorSyncJob_WithSuspendedState_ExpectNextStatusCanceled() throws Exception {
// Create pending sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Object canceledAtBeforeUpdate = syncJobSourceBeforeUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName());
assertThat(syncJobId, notNullValue());
assertThat(canceledAtBeforeUpdate, nullValue());
assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELED)));
// Set sync job to suspended
updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.SUSPENDED);
// Cancel sync job
UpdateResponse updateResponse = awaitCancelConnectorSyncJob(syncJobId);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusAfterUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Instant canceledAtAfterUpdate = Instant.parse(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.CANCELED_AT_FIELD.getPreferredName())
);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertThat(canceledAtAfterUpdate, notNullValue());
assertThat(syncStatusAfterUpdate, equalTo(ConnectorSyncStatus.CANCELED));
assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testCancelConnectorSyncJob_WithCompletedState_ExpectStatusException() throws Exception {
// Create sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
ConnectorSyncStatus syncStatusBeforeUpdate = ConnectorSyncStatus.fromString(
(String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
Object cancellationRequestedAtBeforeUpdate = syncJobSourceBeforeUpdate.get(
ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD.getPreferredName()
);
assertThat(syncJobId, notNullValue());
assertThat(cancellationRequestedAtBeforeUpdate, nullValue());
assertThat(syncStatusBeforeUpdate, not(equalTo(ConnectorSyncStatus.CANCELING)));
// Set sync job status to `completed`
updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.COMPLETED);
// Cancel sync job
assertThrows(ElasticsearchStatusException.class, () -> awaitCancelConnectorSyncJob(syncJobId));
}
public void testCancelConnectorSyncJob_WithMissingSyncJobId_ExpectException() {
expectThrows(ResourceNotFoundException.class, () -> awaitCancelConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID));
}
public void testListConnectorSyncJobs() throws Exception {
int numberOfSyncJobs = 5;
List<ConnectorSyncJob> syncJobs = new ArrayList<>();
for (int i = 0; i < numberOfSyncJobs; i++) {
PostConnectorSyncJobAction.Request request = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(request);
ConnectorSyncJob syncJob = awaitGetConnectorSyncJob(response.getId());
syncJobs.add(syncJob);
}
ConnectorSyncJobIndexService.ConnectorSyncJobsResult firstTwoSyncJobs = awaitListConnectorSyncJobs(0, 2, null, null, null);
ConnectorSyncJobIndexService.ConnectorSyncJobsResult nextTwoSyncJobs = awaitListConnectorSyncJobs(2, 2, null, null, null);
ConnectorSyncJobIndexService.ConnectorSyncJobsResult lastSyncJobs = awaitListConnectorSyncJobs(4, 100, null, null, null);
ConnectorSyncJob firstSyncJob = ConnectorSyncJob.fromXContentBytes(
firstTwoSyncJobs.connectorSyncJobs().get(0).getSourceRef(),
firstTwoSyncJobs.connectorSyncJobs().get(0).getDocId(),
XContentType.JSON
);
ConnectorSyncJob secondSyncJob = ConnectorSyncJob.fromXContentBytes(
firstTwoSyncJobs.connectorSyncJobs().get(1).getSourceRef(),
firstTwoSyncJobs.connectorSyncJobs().get(1).getDocId(),
XContentType.JSON
);
ConnectorSyncJob thirdSyncJob = ConnectorSyncJob.fromXContentBytes(
nextTwoSyncJobs.connectorSyncJobs().get(0).getSourceRef(),
nextTwoSyncJobs.connectorSyncJobs().get(0).getDocId(),
XContentType.JSON
);
ConnectorSyncJob fourthSyncJob = ConnectorSyncJob.fromXContentBytes(
nextTwoSyncJobs.connectorSyncJobs().get(1).getSourceRef(),
nextTwoSyncJobs.connectorSyncJobs().get(1).getDocId(),
XContentType.JSON
);
ConnectorSyncJob fifthSyncJob = ConnectorSyncJob.fromXContentBytes(
lastSyncJobs.connectorSyncJobs().get(0).getSourceRef(),
lastSyncJobs.connectorSyncJobs().get(0).getDocId(),
XContentType.JSON
);
assertThat(firstTwoSyncJobs.connectorSyncJobs().size(), equalTo(2));
assertThat(firstTwoSyncJobs.totalResults(), equalTo(5L));
assertThat(nextTwoSyncJobs.connectorSyncJobs().size(), equalTo(2));
assertThat(nextTwoSyncJobs.totalResults(), equalTo(5L));
assertThat(lastSyncJobs.connectorSyncJobs().size(), equalTo(1));
assertThat(lastSyncJobs.totalResults(), equalTo(5L));
// Sync jobs are returned in most-recently created order
assertThat(firstSyncJob, equalTo(syncJobs.get(4)));
assertThat(secondSyncJob, equalTo(syncJobs.get(3)));
assertThat(thirdSyncJob, equalTo(syncJobs.get(2)));
assertThat(fourthSyncJob, equalTo(syncJobs.get(1)));
assertThat(fifthSyncJob, equalTo(syncJobs.get(0)));
// assert ordering: descending order by creation date
assertTrue(fourthSyncJob.getCreatedAt().isAfter(fifthSyncJob.getCreatedAt()));
assertTrue(thirdSyncJob.getCreatedAt().isAfter(fourthSyncJob.getCreatedAt()));
assertTrue(secondSyncJob.getCreatedAt().isAfter(thirdSyncJob.getCreatedAt()));
assertTrue(firstSyncJob.getCreatedAt().isAfter(secondSyncJob.getCreatedAt()));
}
public void testListConnectorSyncJobs_WithStatusPending_GivenOnePendingTwoCanceled_ExpectOnePending() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Request requestThree = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response responseOne = awaitPutConnectorSyncJob(requestOne);
PostConnectorSyncJobAction.Response responseTwo = awaitPutConnectorSyncJob(requestTwo);
PostConnectorSyncJobAction.Response responseThree = awaitPutConnectorSyncJob(requestThree);
String syncJobOneId = responseOne.getId();
String syncJobTwoId = responseTwo.getId();
String syncJobThreeId = responseThree.getId();
// cancel sync job two and three -> one pending left
awaitCancelConnectorSyncJob(syncJobTwoId);
awaitCancelConnectorSyncJob(syncJobThreeId);
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
null,
ConnectorSyncStatus.PENDING,
null
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String idOfReturnedSyncJob = connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId();
assertThat(numberOfResults, equalTo(1L));
assertThat(idOfReturnedSyncJob, equalTo(syncJobOneId));
}
public void testListConnectorSyncJobs_WithConnectorOneId_GivenTwoOverallOneFromConnectorOne_ExpectOne() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorTwoId
);
awaitPutConnectorSyncJob(requestOne);
awaitPutConnectorSyncJob(requestTwo);
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
connectorOneId,
null,
null
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String connectorIdOfReturnedSyncJob = ConnectorSyncJob.fromXContentBytes(
connectorSyncJobsResult.connectorSyncJobs().get(0).getSourceRef(),
connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId(),
XContentType.JSON
).getConnector().getConnectorId();
assertThat(numberOfResults, equalTo(1L));
assertThat(connectorIdOfReturnedSyncJob, equalTo(connectorOneId));
}
public void testListConnectorSyncJobs_WithJobTypeFull_GivenOnePerType_ExpectOneFull() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.FULL
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.INCREMENTAL
);
PostConnectorSyncJobAction.Request requestThree = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.ACCESS_CONTROL
);
PostConnectorSyncJobAction.Response responseOne = awaitPutConnectorSyncJob(requestOne);
awaitPutConnectorSyncJob(requestTwo);
awaitPutConnectorSyncJob(requestThree);
String syncJobOneId = responseOne.getId();
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
null,
null,
Collections.singletonList(ConnectorSyncJobType.FULL)
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String idOfReturnedSyncJob = connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId();
assertThat(numberOfResults, equalTo(1L));
assertThat(idOfReturnedSyncJob, equalTo(syncJobOneId));
}
public void testListConnectorSyncJobs_WithJobTypeIncremental_GivenOnePerType_ExpectOneIncremental() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.FULL
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.INCREMENTAL
);
PostConnectorSyncJobAction.Request requestThree = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.ACCESS_CONTROL
);
awaitPutConnectorSyncJob(requestOne);
PostConnectorSyncJobAction.Response responseTwo = awaitPutConnectorSyncJob(requestTwo);
awaitPutConnectorSyncJob(requestThree);
String syncJobTwoId = responseTwo.getId();
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
null,
null,
Collections.singletonList(ConnectorSyncJobType.INCREMENTAL)
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String idOfReturnedSyncJob = connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId();
assertThat(numberOfResults, equalTo(1L));
assertThat(idOfReturnedSyncJob, equalTo(syncJobTwoId));
}
public void testListConnectorSyncJobs_WithJobTypeAccessControl_GivenOnePerType_ExpectOneAccessControl() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.FULL
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.INCREMENTAL
);
PostConnectorSyncJobAction.Request requestThree = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.ACCESS_CONTROL
);
awaitPutConnectorSyncJob(requestOne);
awaitPutConnectorSyncJob(requestTwo);
PostConnectorSyncJobAction.Response responseThree = awaitPutConnectorSyncJob(requestThree);
String syncJobThreeId = responseThree.getId();
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
null,
null,
Collections.singletonList(ConnectorSyncJobType.ACCESS_CONTROL)
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String idOfReturnedSyncJob = connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId();
assertThat(numberOfResults, equalTo(1L));
assertThat(idOfReturnedSyncJob, equalTo(syncJobThreeId));
}
public void testListConnectorSyncJobs_WithJobTypeFullAndIncremental_GivenOnePerType_ExpectOneFullOneIncremental() throws Exception {
PostConnectorSyncJobAction.Request requestOne = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.FULL
);
PostConnectorSyncJobAction.Request requestTwo = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.INCREMENTAL
);
PostConnectorSyncJobAction.Request requestThree = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId,
ConnectorSyncJobType.ACCESS_CONTROL
);
PostConnectorSyncJobAction.Response responseOne = awaitPutConnectorSyncJob(requestOne);
PostConnectorSyncJobAction.Response responseTwo = awaitPutConnectorSyncJob(requestTwo);
awaitPutConnectorSyncJob(requestThree);
String syncJobOneId = responseOne.getId();
String syncJobTwoId = responseTwo.getId();
ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult = awaitListConnectorSyncJobs(
0,
100,
null,
null,
Arrays.asList(ConnectorSyncJobType.FULL, ConnectorSyncJobType.INCREMENTAL)
);
long numberOfResults = connectorSyncJobsResult.totalResults();
String idOfReturnedSyncJobOne = connectorSyncJobsResult.connectorSyncJobs().get(0).getDocId();
String idOfReturnedSyncJobTwo = connectorSyncJobsResult.connectorSyncJobs().get(1).getDocId();
assertThat(numberOfResults, equalTo(2L));
// Sync jobs are returned in most-recently created order
assertThat(idOfReturnedSyncJobTwo, equalTo(syncJobOneId));
assertThat(idOfReturnedSyncJobOne, equalTo(syncJobTwoId));
}
public void testListConnectorSyncJobs_WithNoSyncJobs_ReturnEmptyResult() throws Exception {
ConnectorSyncJobIndexService.ConnectorSyncJobsResult firstOneHundredSyncJobs = awaitListConnectorSyncJobs(0, 100, null, null, null);
assertThat(firstOneHundredSyncJobs.connectorSyncJobs().size(), equalTo(0));
assertThat(firstOneHundredSyncJobs.totalResults(), equalTo(0L));
}
public void testUpdateConnectorSyncJobError() throws Exception {
// Create sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
// Set sync job to in progress
updateConnectorSyncJobStatusWithoutStateMachineGuard(syncJobId, ConnectorSyncStatus.IN_PROGRESS);
// Set sync job error
UpdateConnectorSyncJobErrorAction.Request request = ConnectorSyncJobTestUtils.getRandomUpdateConnectorSyncJobErrorActionRequest();
String errorInRequest = request.getError();
UpdateResponse updateResponse = awaitUpdateConnectorSyncJob(syncJobId, errorInRequest);
Map<String, Object> connectorSyncJobSource = getConnectorSyncJobSourceById(syncJobId);
String error = (String) connectorSyncJobSource.get(ConnectorSyncJob.ERROR_FIELD.getPreferredName());
ConnectorSyncStatus syncStatus = ConnectorSyncStatus.fromString(
(String) connectorSyncJobSource.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName())
);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertThat(error, equalTo(errorInRequest));
assertThat(syncStatus, equalTo(ConnectorSyncStatus.ERROR));
}
public void testUpdateConnectorSyncJobError_WithMissingSyncJobId_ExceptException() {
expectThrows(
ResourceNotFoundException.class,
() -> awaitUpdateConnectorSyncJob(NON_EXISTING_SYNC_JOB_ID, randomAlphaOfLengthBetween(5, 100))
);
}
public void testUpdateConnectorSyncJobError_WithStatusPending_ExpectStatusException() throws Exception {
// Create sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
// Try to set error
assertThrows(ElasticsearchStatusException.class, () -> awaitUpdateConnectorSyncJob(syncJobId, "some error"));
}
@SuppressWarnings("unchecked")
public void testUpdateConnectorSyncJobIngestionStats() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
UpdateConnectorSyncJobIngestionStatsAction.Request request = ConnectorSyncJobTestUtils
.getRandomUpdateConnectorSyncJobIngestionStatsActionRequest(syncJobId);
UpdateResponse updateResponse = awaitUpdateConnectorSyncJobIngestionStats(request);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
Long requestDeletedDocumentCount = request.getDeletedDocumentCount();
Long requestIndexedDocumentCount = request.getIndexedDocumentCount();
Long requestIndexedDocumentVolume = request.getIndexedDocumentVolume();
Long requestTotalDocumentCount = request.getTotalDocumentCount();
Instant requestLastSeen = request.getLastSeen();
Map<String, Object> metadata = request.getMetadata();
Long deletedDocumentCountAfterUpdate = (Long) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.DELETED_DOCUMENT_COUNT_FIELD.getPreferredName()
);
Long indexedDocumentCountAfterUpdate = (Long) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.INDEXED_DOCUMENT_COUNT_FIELD.getPreferredName()
);
Long indexedDocumentVolumeAfterUpdate = (Long) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.INDEXED_DOCUMENT_VOLUME_FIELD.getPreferredName()
);
Long totalDocumentCountAfterUpdate = (Long) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.TOTAL_DOCUMENT_COUNT_FIELD.getPreferredName()
);
Instant lastSeenAfterUpdate = Instant.parse(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName())
);
Map<String, Object> metadataAfterUpdate = (Map<String, Object>) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.METADATA_FIELD.getPreferredName()
);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertThat(deletedDocumentCountAfterUpdate, equalTo(requestDeletedDocumentCount));
assertThat(indexedDocumentCountAfterUpdate, equalTo(requestIndexedDocumentCount));
assertThat(indexedDocumentVolumeAfterUpdate, equalTo(requestIndexedDocumentVolume));
assertThat(totalDocumentCountAfterUpdate, equalTo(requestTotalDocumentCount));
assertThat(lastSeenAfterUpdate, equalTo(requestLastSeen));
assertThat(metadataAfterUpdate, equalTo(metadata));
assertFieldsExceptAllIngestionStatsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testUpdateConnectorSyncJobIngestionStats_WithoutLastSeen_ExpectUpdateOfLastSeen() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
Instant lastSeenBeforeUpdate = Instant.parse(
(String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName())
);
UpdateConnectorSyncJobIngestionStatsAction.Request request = new UpdateConnectorSyncJobIngestionStatsAction.Request(
syncJobId,
10L,
20L,
100L,
10L,
null,
null
);
safeSleep(ONE_SECOND_IN_MILLIS);
UpdateResponse updateResponse = awaitUpdateConnectorSyncJobIngestionStats(request);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
Instant lastSeenAfterUpdate = Instant.parse(
(String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName())
);
long secondsBetweenLastSeenBeforeAndAfterUpdate = ChronoUnit.SECONDS.between(lastSeenBeforeUpdate, lastSeenAfterUpdate);
assertThat(updateResponse.status(), equalTo(RestStatus.OK));
assertTrue(lastSeenAfterUpdate.isAfter(lastSeenBeforeUpdate));
assertThat(secondsBetweenLastSeenBeforeAndAfterUpdate, greaterThanOrEqualTo(1L));
assertFieldsExceptAllIngestionStatsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate);
}
public void testUpdateConnectorSyncJobIngestionStats_WithMissingSyncJobId_ExpectException() {
expectThrows(
ResourceNotFoundException.class,
() -> awaitUpdateConnectorSyncJobIngestionStats(
new UpdateConnectorSyncJobIngestionStatsAction.Request(NON_EXISTING_SYNC_JOB_ID, 0L, 0L, 0L, 0L, Instant.now(), null)
)
);
}
public void testTransformConnectorFilteringToSyncJobRepresentation_WithFilteringEqualNull() {
List<ConnectorFiltering> filtering = null;
assertNull(connectorSyncJobIndexService.transformConnectorFilteringToSyncJobRepresentation(filtering));
}
public void testTransformConnectorFilteringToSyncJobRepresentation_WithFilteringEmpty() {
List<ConnectorFiltering> filtering = Collections.emptyList();
assertNull(connectorSyncJobIndexService.transformConnectorFilteringToSyncJobRepresentation(filtering));
}
public void testTransformConnectorFilteringToSyncJobRepresentation_WithFilteringRules() {
ConnectorFiltering filtering1 = ConnectorTestUtils.getRandomConnectorFiltering();
List<ConnectorFiltering> filtering = List.of(filtering1, ConnectorTestUtils.getRandomConnectorFiltering());
assertEquals(connectorSyncJobIndexService.transformConnectorFilteringToSyncJobRepresentation(filtering), filtering1.getActive());
}
public void testClaimConnectorSyncJob() throws Exception {
// Create sync job
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
@SuppressWarnings("unchecked")
Map<String, Object> syncJobConnectorBeforeUpdate = (Map<String, Object>) syncJobSourceBeforeUpdate.get(
ConnectorSyncJob.CONNECTOR_FIELD.getPreferredName()
);
// Claim sync job
ClaimConnectorSyncJobAction.Request claimRequest = new ClaimConnectorSyncJobAction.Request(
syncJobId,
randomAlphaOfLengthBetween(5, 100),
Map.of(randomAlphaOfLengthBetween(5, 100), randomAlphaOfLengthBetween(5, 100))
);
UpdateResponse claimResponse = awaitClaimConnectorSyncJob(claimRequest);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
@SuppressWarnings("unchecked")
Map<String, Object> syncJobConnectorAfterUpdate = (Map<String, Object>) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.CONNECTOR_FIELD.getPreferredName()
);
assertThat(claimResponse.status(), equalTo(RestStatus.OK));
assertThat(syncJobConnectorAfterUpdate.get("sync_cursor"), equalTo(claimRequest.getSyncCursor()));
assertFieldsDidNotUpdateExceptFieldList(
syncJobConnectorBeforeUpdate,
syncJobConnectorAfterUpdate,
List.of(Connector.SYNC_CURSOR_FIELD)
);
assertThat(
syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()),
equalTo(ConnectorSyncStatus.PENDING.toString())
);
assertThat(
syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()),
equalTo(ConnectorSyncStatus.IN_PROGRESS.toString())
);
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(
ConnectorSyncJob.STATUS_FIELD,
ConnectorSyncJob.CONNECTOR_FIELD,
ConnectorSyncJob.LAST_SEEN_FIELD,
ConnectorSyncJob.WORKER_HOSTNAME_FIELD
)
);
}
public void testClaimConnectorSyncJob_WithMissingSyncJobId_ExpectException() {
expectThrows(
ResourceNotFoundException.class,
() -> awaitClaimConnectorSyncJob(
new ClaimConnectorSyncJobAction.Request(NON_EXISTING_SYNC_JOB_ID, randomAlphaOfLengthBetween(5, 100), Map.of())
)
);
}
public void testClaimConnectorSyncJob_WithMissingSyncCursor() throws Exception {
PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(
connectorOneId
);
PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest);
String syncJobId = response.getId();
Map<String, Object> syncJobSourceBeforeUpdate = getConnectorSyncJobSourceById(syncJobId);
@SuppressWarnings("unchecked")
Map<String, Object> syncJobConnectorBeforeUpdate = (Map<String, Object>) syncJobSourceBeforeUpdate.get(
ConnectorSyncJob.CONNECTOR_FIELD.getPreferredName()
);
// Claim sync job
ClaimConnectorSyncJobAction.Request claimRequest = new ClaimConnectorSyncJobAction.Request(
syncJobId,
randomAlphaOfLengthBetween(5, 100),
null
);
UpdateResponse claimResponse = awaitClaimConnectorSyncJob(claimRequest);
Map<String, Object> syncJobSourceAfterUpdate = getConnectorSyncJobSourceById(syncJobId);
@SuppressWarnings("unchecked")
Map<String, Object> syncJobConnectorAfterUpdate = (Map<String, Object>) syncJobSourceAfterUpdate.get(
ConnectorSyncJob.CONNECTOR_FIELD.getPreferredName()
);
assertThat(claimResponse.status(), equalTo(RestStatus.OK));
assertThat(syncJobConnectorAfterUpdate.get("sync_cursor"), nullValue());
assertThat(syncJobConnectorBeforeUpdate, equalTo(syncJobConnectorAfterUpdate));
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(ConnectorSyncJob.STATUS_FIELD, ConnectorSyncJob.LAST_SEEN_FIELD, ConnectorSyncJob.WORKER_HOSTNAME_FIELD)
);
assertThat(
syncJobSourceBeforeUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()),
equalTo(ConnectorSyncStatus.PENDING.toString())
);
assertThat(
syncJobSourceAfterUpdate.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()),
equalTo(ConnectorSyncStatus.IN_PROGRESS.toString())
);
}
private UpdateResponse awaitClaimConnectorSyncJob(ClaimConnectorSyncJobAction.Request request) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<UpdateResponse> resp = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.claimConnectorSyncJob(
request.getConnectorSyncJobId(),
request.getWorkerHostname(),
request.getSyncCursor(),
new ActionListener<>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
resp.set(updateResponse);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
exc.set(e);
latch.countDown();
}
}
);
assertTrue("Timeout waiting for claim request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
if (exc.get() != null) {
throw exc.get();
}
assertNotNull("Received null response from claim request", resp.get());
return resp.get();
}
private UpdateResponse awaitUpdateConnectorSyncJobIngestionStats(UpdateConnectorSyncJobIngestionStatsAction.Request request)
throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<UpdateResponse> resp = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.updateConnectorSyncJobIngestionStats(request, new ActionListener<>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
resp.set(updateResponse);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
exc.set(e);
latch.countDown();
}
});
assertTrue("Timeout waiting for update request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
if (exc.get() != null) {
throw exc.get();
}
assertNotNull("Received null response from update request", resp.get());
return resp.get();
}
private static void assertFieldsExceptAllIngestionStatsDidNotUpdate(
Map<String, Object> syncJobSourceBeforeUpdate,
Map<String, Object> syncJobSourceAfterUpdate
) {
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(
ConnectorSyncJob.DELETED_DOCUMENT_COUNT_FIELD,
ConnectorSyncJob.INDEXED_DOCUMENT_COUNT_FIELD,
ConnectorSyncJob.INDEXED_DOCUMENT_VOLUME_FIELD,
ConnectorSyncJob.TOTAL_DOCUMENT_COUNT_FIELD,
ConnectorSyncJob.LAST_SEEN_FIELD,
ConnectorSyncJob.METADATA_FIELD
)
);
}
private static void assertFieldsExceptSyncStatusAndCancellationRequestedAtDidNotUpdate(
Map<String, Object> syncJobSourceBeforeUpdate,
Map<String, Object> syncJobSourceAfterUpdate
) {
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(ConnectorSyncJob.STATUS_FIELD, ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD)
);
}
private static void assertFieldsExceptSyncStatusAndCanceledAndCompletedTimestampsDidNotUpdate(
Map<String, Object> syncJobSourceBeforeUpdate,
Map<String, Object> syncJobSourceAfterUpdate
) {
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(
ConnectorSyncJob.STATUS_FIELD,
ConnectorSyncJob.CANCELED_AT_FIELD,
ConnectorSyncJob.COMPLETED_AT_FIELD,
ConnectorSyncJob.CANCELATION_REQUESTED_AT_FIELD
)
);
}
private static void assertFieldsExceptLastSeenDidNotUpdate(
Map<String, Object> syncJobSourceBeforeUpdate,
Map<String, Object> syncJobSourceAfterUpdate
) {
assertFieldsDidNotUpdateExceptFieldList(
syncJobSourceBeforeUpdate,
syncJobSourceAfterUpdate,
List.of(ConnectorSyncJob.LAST_SEEN_FIELD)
);
}
private static void assertFieldsDidNotUpdateExceptFieldList(
Map<String, Object> syncJobSourceBeforeUpdate,
Map<String, Object> syncJobSourceAfterUpdate,
List<ParseField> fieldsWhichShouldUpdate
) {
Set<String> fieldsNamesWhichShouldUpdate = fieldsWhichShouldUpdate.stream()
.map(ParseField::getPreferredName)
.collect(Collectors.toSet());
for (Map.Entry<String, Object> field : syncJobSourceBeforeUpdate.entrySet()) {
String fieldName = field.getKey();
boolean isFieldWhichShouldNotUpdate = fieldsNamesWhichShouldUpdate.contains(fieldName) == false;
if (isFieldWhichShouldNotUpdate) {
Object fieldValueBeforeUpdate = field.getValue();
Object fieldValueAfterUpdate = syncJobSourceAfterUpdate.get(fieldName);
assertThat(
"Every field except ["
+ String.join(",", fieldsNamesWhichShouldUpdate)
+ "] should stay the same. ["
+ fieldName
+ "] did change.",
fieldValueBeforeUpdate,
equalTo(fieldValueAfterUpdate)
);
}
}
}
private ConnectorSyncJobIndexService.ConnectorSyncJobsResult awaitListConnectorSyncJobs(
int from,
int size,
String connectorId,
ConnectorSyncStatus syncStatus,
List<ConnectorSyncJobType> jobTypeList
) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<ConnectorSyncJobIndexService.ConnectorSyncJobsResult> result = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.listConnectorSyncJobs(from, size, connectorId, syncStatus, jobTypeList, new ActionListener<>() {
@Override
public void onResponse(ConnectorSyncJobIndexService.ConnectorSyncJobsResult connectorSyncJobsResult) {
result.set(connectorSyncJobsResult);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
exc.set(e);
latch.countDown();
}
});
assertTrue("Timeout waiting for list request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
if (exc.get() != null) {
throw exc.get();
}
assertNotNull("Received null response from list request", result.get());
return result.get();
}
private UpdateResponse awaitUpdateConnectorSyncJob(String syncJobId, String error) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<UpdateResponse> resp = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.updateConnectorSyncJobError(syncJobId, error, new ActionListener<>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
resp.set(updateResponse);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
exc.set(e);
latch.countDown();
}
});
assertTrue("Timeout waiting for update request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
if (exc.get() != null) {
throw exc.get();
}
assertNotNull("Received null response from update request", resp.get());
return resp.get();
}
private UpdateResponse awaitCancelConnectorSyncJob(String syncJobId) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<UpdateResponse> resp = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.cancelConnectorSyncJob(syncJobId, new ActionListener<>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
resp.set(updateResponse);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
exc.set(e);
latch.countDown();
}
});
assertTrue("Timeout waiting for cancel request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
if (exc.get() != null) {
throw exc.get();
}
assertNotNull("Received null response from cancel request", resp.get());
return resp.get();
}
private Map<String, Object> getConnectorSyncJobSourceById(String syncJobId) throws ExecutionException, InterruptedException,
TimeoutException {
GetRequest getRequest = new GetRequest(ConnectorSyncJobIndexService.CONNECTOR_SYNC_JOB_INDEX_NAME, syncJobId);
ActionFuture<GetResponse> getResponseActionFuture = client().get(getRequest);
return getResponseActionFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).getSource();
}
private ConnectorSyncJob awaitGetConnectorSyncJob(String connectorSyncJobId) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<ConnectorSyncJob> resp = new AtomicReference<>(null);
final AtomicReference<Exception> exc = new AtomicReference<>(null);
connectorSyncJobIndexService.getConnectorSyncJob(connectorSyncJobId, new ActionListener<ConnectorSyncJobSearchResult>() {
@Override
public void onResponse(ConnectorSyncJobSearchResult searchResult) {
// Serialize the sourceRef to ConnectorSyncJob
|
ConnectorSyncJobIndexServiceTests
|
java
|
grpc__grpc-java
|
rls/src/test/java/io/grpc/rls/LinkedHashLruCacheTest.java
|
{
"start": 9988,
"end": 10886
}
|
class ____ extends LinkedHashLruCache<Integer, Entry> {
private boolean allowEviction = false;
TestFitToLimitEviction(
long estimatedMaxSizeBytes,
@Nullable EvictionListener<Integer, Entry> evictionListener,
Ticker ticker) {
super(estimatedMaxSizeBytes, evictionListener, ticker);
}
@Override
protected boolean isExpired(Integer key, Entry value, long nowNanos) {
return value.expireTime - nowNanos <= 0;
}
@Override
protected int estimateSizeOf(Integer key, Entry value) {
return value.size;
}
@Override
protected boolean shouldInvalidateEldestEntry(Integer eldestKey, Entry eldestValue, long now) {
return allowEviction && super.shouldInvalidateEldestEntry(eldestKey, eldestValue, now);
}
public void enableEviction() {
allowEviction = true;
}
}
}
|
TestFitToLimitEviction
|
java
|
playframework__playframework
|
core/play/src/main/java/play/mvc/Http.java
|
{
"start": 57915,
"end": 60073
}
|
class ____ {
private final play.api.mvc.Session underlying;
public Session() {
this.underlying = new play.api.mvc.Session(Scala.asScala(Collections.emptyMap()));
}
public Session(Map<String, String> data) {
this.underlying = new play.api.mvc.Session(Scala.asScala(data));
}
public Session(play.api.mvc.Session underlying) {
this.underlying = underlying;
}
public Map<String, String> data() {
return Scala.asJava(this.underlying.data());
}
/** Optionally returns the session value associated with a key. */
public Optional<String> get(String key) {
return OptionConverters.toJava(this.underlying.get(key));
}
/**
* Optionally returns the session value associated with a key.
*
* @deprecated Deprecated as of 2.8.0. Renamed to {@link #get(String)}.
*/
@Deprecated
public Optional<String> getOptional(String key) {
return get(key);
}
/**
* Optionally returns the session value associated with a key.
*
* @deprecated Deprecated as of 2.8.0. Use {@link #get(String)} instead.
*/
@Deprecated
public Optional<String> apply(String key) {
return get(key);
}
/** Returns a new session with the given keys removed. */
public Session removing(String... keys) {
return this.underlying.$minus$minus(Scala.varargs(keys)).asJava();
}
/** Returns a new session with the given key-value pair added. */
public Session adding(String key, String value) {
return this.underlying.$plus(Scala.Tuple(key, value)).asJava();
}
/** Returns a new session with the values from the given map added. */
public Session adding(Map<String, String> values) {
return this.underlying.$plus$plus(Scala.asScala(values)).asJava();
}
/**
* Convert this session to a Scala session.
*
* @return the Scala session.
*/
public play.api.mvc.Session asScala() {
return this.underlying;
}
}
/**
* HTTP Flash.
*
* <p>Flash data are encoded into an HTTP cookie, and can only contain simple String values.
*/
public static
|
Session
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java
|
{
"start": 679,
"end": 1026
}
|
class ____ extends ResourceNotFoundException {
public RetentionLeaseNotFoundException(final String id) {
super("retention lease with ID [" + Objects.requireNonNull(id) + "] not found");
}
public RetentionLeaseNotFoundException(final StreamInput in) throws IOException {
super(in);
}
}
|
RetentionLeaseNotFoundException
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryLongToIntConversionTest.java
|
{
"start": 11180,
"end": 11666
}
|
class ____ {
void acceptsLong(long value) {}
void foo() {
Long x = Long.valueOf(1);
acceptsLong(x);
}
}
""")
.setFixChooser(FIRST)
.doTest(TEXT_MATCH);
}
@Test
public void suggestReplacingStaticMethodWithConstrainToRange() {
refactoringHelper
.addInputLines(
"in/A.java",
"""
import java.lang.Math;
public
|
A
|
java
|
grpc__grpc-java
|
servlet/src/test/java/io/grpc/servlet/ServletServerBuilderTest.java
|
{
"start": 1475,
"end": 4116
}
|
class ____ {
@Test
public void scheduledExecutorService() throws Exception {
ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
AsyncContext asyncContext = mock(AsyncContext.class);
ServletInputStream inputStream = mock(ServletInputStream.class);
ServletOutputStream outputStream = mock(ServletOutputStream.class);
ScheduledFuture<?> future = mock(ScheduledFuture.class);
doReturn(future).when(scheduler).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
doReturn(true).when(request).isAsyncSupported();
doReturn(asyncContext).when(request).startAsync(request, response);
doReturn("application/grpc").when(request).getContentType();
doReturn("/hello/world").when(request).getRequestURI();
@SuppressWarnings({"JdkObsolete", "unchecked"}) // Required by servlet API signatures.
// StringTokenizer is actually Enumeration<String>
Enumeration<String> headerNames =
(Enumeration<String>) ((Enumeration<?>) new StringTokenizer("grpc-timeout"));
@SuppressWarnings({"JdkObsolete", "unchecked"})
Enumeration<String> headers =
(Enumeration<String>) ((Enumeration<?>) new StringTokenizer("1m"));
doReturn(headerNames).when(request).getHeaderNames();
doReturn(headers).when(request).getHeaders("grpc-timeout");
doReturn(new StringBuffer("localhost:8080")).when(request).getRequestURL();
doReturn(inputStream).when(request).getInputStream();
doReturn("1.1.1.1").when(request).getLocalAddr();
doReturn(8080).when(request).getLocalPort();
doReturn("remote").when(request).getRemoteHost();
doReturn(80).when(request).getRemotePort();
doReturn(outputStream).when(response).getOutputStream();
doReturn(request).when(asyncContext).getRequest();
doReturn(response).when(asyncContext).getResponse();
ServletServerBuilder serverBuilder =
new ServletServerBuilder().scheduledExecutorService(scheduler);
ServletAdapter servletAdapter = serverBuilder.buildServletAdapter();
servletAdapter.doPost(request, response);
verify(asyncContext).setTimeout(1 + ServletAdapter.ASYNC_TIMEOUT_SAFETY_MARGIN);
// The following just verifies that scheduler is populated to the transport.
// It doesn't matter what tasks (such as handshake timeout and request deadline) are actually
// scheduled.
verify(scheduler, timeout(5000).atLeastOnce())
.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
}
}
|
ServletServerBuilderTest
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/permission/HttpSecPolicyGrantingPermissionsLazyAuthTest.java
|
{
"start": 549,
"end": 1240
}
|
class ____ extends AbstractHttpSecurityPolicyGrantingPermissionsTest {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest().setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(TestIdentityController.class, TestIdentityProvider.class, PermissionsPathHandler.class,
CDIBean.class, CustomPermission.class, CustomPermissionWithActions.class)
.addAsResource("conf/http-permission-grant-config.properties", "application.properties")
.addAsResource(new StringAsset("quarkus.http.auth.proactive=false\n"), "META-INF/microprofile-config.properties"));
}
|
HttpSecPolicyGrantingPermissionsLazyAuthTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java
|
{
"start": 50745,
"end": 50841
}
|
class ____ extends InheritedAnnotationClass {
}
@Order
public static
|
SubInheritedAnnotationClass
|
java
|
google__guava
|
android/guava/src/com/google/common/cache/CacheBuilder.java
|
{
"start": 29206,
"end": 30595
}
|
class ____.
*
* @return this {@code CacheBuilder} instance (for chaining)
* @throws IllegalStateException if the key strength was already set
*/
@GwtIncompatible // java.lang.ref.WeakReference
@CanIgnoreReturnValue
public CacheBuilder<K, V> weakKeys() {
return setKeyStrength(Strength.WEAK);
}
@CanIgnoreReturnValue
CacheBuilder<K, V> setKeyStrength(Strength strength) {
checkState(keyStrength == null, "Key strength was already set to %s", keyStrength);
keyStrength = checkNotNull(strength);
return this;
}
Strength getKeyStrength() {
return MoreObjects.firstNonNull(keyStrength, Strength.STRONG);
}
/**
* Specifies that each value (not key) stored in the cache should be wrapped in a {@link
* WeakReference} (by default, strong references are used).
*
* <p>Weak values will be garbage collected once they are weakly reachable. This makes them a poor
* candidate for caching; consider {@link #softValues} instead.
*
* <p><b>Note:</b> when this method is used, the resulting cache will use identity ({@code ==})
* comparison to determine equality of values.
*
* <p>Entries with values that have been garbage collected may be counted in {@link Cache#size},
* but will never be visible to read or write operations; such entries are cleaned up as part of
* the routine maintenance described in the
|
javadoc
|
java
|
google__dagger
|
javatests/dagger/functional/assisted/AssistedFactoryBindsTest.java
|
{
"start": 1857,
"end": 1894
}
|
class ____ {}
static final
|
AssistedDep
|
java
|
apache__camel
|
components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/CamelMultipartFilter.java
|
{
"start": 971,
"end": 1111
}
|
class ____ extends CamelFilterWrapper {
public CamelMultipartFilter(Filter wrapped) {
super(wrapped);
}
}
|
CamelMultipartFilter
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
|
{
"start": 6245,
"end": 9901
}
|
class ____ {
private final String condition;
private Builder(String condition) {
this.condition = condition;
}
/**
* Indicate that an exact result was found. For example
* {@code foundExactly("foo")} results in the message "found foo".
* @param result the result that was found
* @return a built {@link ConditionMessage}
*/
public ConditionMessage foundExactly(Object result) {
return found("").items(result);
}
/**
* Indicate that one or more results were found. For example
* {@code found("bean").items("x")} results in the message "found bean x".
* @param article the article found
* @return an {@link ItemsBuilder}
*/
public ItemsBuilder found(String article) {
return found(article, article);
}
/**
* Indicate that one or more results were found. For example
* {@code found("bean", "beans").items("x", "y")} results in the message "found
* beans x, y".
* @param singular the article found in singular form
* @param plural the article found in plural form
* @return an {@link ItemsBuilder}
*/
public ItemsBuilder found(String singular, String plural) {
return new ItemsBuilder(this, "found", singular, plural);
}
/**
* Indicate that one or more results were not found. For example
* {@code didNotFind("bean").items("x")} results in the message "did not find bean
* x".
* @param article the article found
* @return an {@link ItemsBuilder}
*/
public ItemsBuilder didNotFind(String article) {
return didNotFind(article, article);
}
/**
* Indicate that one or more results were found. For example
* {@code didNotFind("bean", "beans").items("x", "y")} results in the message "did
* not find beans x, y".
* @param singular the article found in singular form
* @param plural the article found in plural form
* @return an {@link ItemsBuilder}
*/
public ItemsBuilder didNotFind(String singular, String plural) {
return new ItemsBuilder(this, "did not find", singular, plural);
}
/**
* Indicates a single result. For example {@code resultedIn("yes")} results in the
* message "resulted in yes".
* @param result the result
* @return a built {@link ConditionMessage}
*/
public ConditionMessage resultedIn(Object result) {
return because("resulted in " + result);
}
/**
* Indicates something is available. For example {@code available("money")}
* results in the message "money is available".
* @param item the item that is available
* @return a built {@link ConditionMessage}
*/
public ConditionMessage available(String item) {
return because(item + " is available");
}
/**
* Indicates something is not available. For example {@code notAvailable("time")}
* results in the message "time is not available".
* @param item the item that is not available
* @return a built {@link ConditionMessage}
*/
public ConditionMessage notAvailable(String item) {
return because(item + " is not available");
}
/**
* Indicates the reason. For example {@code because("running Linux")} results in
* the message "running Linux".
* @param reason the reason for the message
* @return a built {@link ConditionMessage}
*/
public ConditionMessage because(@Nullable String reason) {
if (StringUtils.hasLength(reason)) {
return new ConditionMessage(ConditionMessage.this,
StringUtils.hasLength(this.condition) ? this.condition + " " + reason : reason);
}
return new ConditionMessage(ConditionMessage.this, this.condition);
}
}
/**
* Builder used to create an {@link ItemsBuilder} for a condition.
*/
public final
|
Builder
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/server/authentication/RedirectServerAuthenticationFailureHandler.java
|
{
"start": 1173,
"end": 2219
}
|
class ____ implements ServerAuthenticationFailureHandler {
private final URI location;
private ServerRedirectStrategy redirectStrategy = new DefaultServerRedirectStrategy();
/**
* Creates an instance
* @param location the location to redirect to (i.e. "/login?failed")
*/
public RedirectServerAuthenticationFailureHandler(String location) {
Assert.notNull(location, "location cannot be null");
this.location = URI.create(location);
}
/**
* Sets the RedirectStrategy to use.
* @param redirectStrategy the strategy to use. Default is DefaultRedirectStrategy.
*/
public void setRedirectStrategy(ServerRedirectStrategy redirectStrategy) {
Assert.notNull(redirectStrategy, "redirectStrategy cannot be null");
this.redirectStrategy = redirectStrategy;
}
@Override
public Mono<Void> onAuthenticationFailure(WebFilterExchange webFilterExchange, AuthenticationException exception) {
return this.redirectStrategy.sendRedirect(webFilterExchange.getExchange(), this.location);
}
}
|
RedirectServerAuthenticationFailureHandler
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/ConcurrentBeanWrapperTests.java
|
{
"start": 1185,
"end": 2777
}
|
class ____ {
private final Log logger = LogFactory.getLog(getClass());
private final Set<TestRun> set = ConcurrentHashMap.newKeySet();
private Throwable ex = null;
@RepeatedTest(100)
void testSingleThread() {
performSet();
}
@Test
void testConcurrent() {
for (int i = 0; i < 10; i++) {
TestRun run = new TestRun(this);
set.add(run);
Thread t = new Thread(run);
t.setDaemon(true);
t.start();
}
logger.info("Thread creation over, " + set.size() + " still active.");
synchronized (this) {
while (!set.isEmpty() && ex == null) {
try {
wait();
}
catch (InterruptedException e) {
logger.info(e.toString());
}
logger.info(set.size() + " threads still active.");
}
}
if (ex != null) {
throw new AssertionError("Unexpected exception", ex);
}
}
private static void performSet() {
TestBean bean = new TestBean();
Properties p = (Properties) System.getProperties().clone();
assertThat(p).as("The System properties must not be empty").isNotEmpty();
for (Iterator<?> i = p.entrySet().iterator(); i.hasNext();) {
i.next();
if (Math.random() > 0.9) {
i.remove();
}
}
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
p.store(buffer, null);
}
catch (IOException e) {
// ByteArrayOutputStream does not throw
// any IOException
}
String value = buffer.toString();
BeanWrapperImpl wrapper = new BeanWrapperImpl(bean);
wrapper.setPropertyValue("properties", value);
assertThat(bean.getProperties()).isEqualTo(p);
}
private static
|
ConcurrentBeanWrapperTests
|
java
|
junit-team__junit5
|
junit-jupiter-api/src/main/java/org/junit/jupiter/api/extension/PreInterruptCallback.java
|
{
"start": 1020,
"end": 2070
}
|
interface ____ extends Extension {
/**
* Property name used to enable dumping the stack of all
* {@linkplain Thread threads} to {@code System.out} when a timeout has occurred.
*
* <p>This behavior is disabled by default.
*
* @since 5.12
*/
@API(status = MAINTAINED, since = "5.13.3")
String THREAD_DUMP_ENABLED_PROPERTY_NAME = "junit.jupiter.execution.timeout.threaddump.enabled";
/**
* Callback that is invoked <em>before</em> a {@link Thread} is interrupted with
* {@link Thread#interrupt()}.
*
* <p>Note: There is no guarantee on which {@link Thread} this callback will be
* executed.
*
* @param preInterruptContext the context with the target {@link Thread}, which will get interrupted.
* @param extensionContext the extension context for the callback; never {@code null}
* @since 5.12
* @see PreInterruptContext
*/
@API(status = MAINTAINED, since = "5.13.3")
void beforeThreadInterrupt(PreInterruptContext preInterruptContext, ExtensionContext extensionContext)
throws Exception;
}
|
PreInterruptCallback
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/jpa/HibernatePersistenceConfiguration.java
|
{
"start": 6084,
"end": 22522
}
|
class ____. This setting is ignored when Hibernate is configured
* to obtain connections from a {@link javax.sql.DataSource}.
*
* @see #JDBC_DRIVER
*/
public HibernatePersistenceConfiguration jdbcDriver(String driverName) {
property( JDBC_DRIVER, driverName );
return this;
}
/**
* JDBC URL. This setting is ignored when Hibernate is configured to obtain
* connections from a {@link javax.sql.DataSource}.
*
* @see #JDBC_URL
*/
public HibernatePersistenceConfiguration jdbcUrl(String url) {
property( JDBC_URL, url );
return this;
}
/**
* Username for JDBC authentication.
*
* @see #JDBC_USER
* @see #jdbcPassword
* @see java.sql.DriverManager#getConnection(String, String, String)
* @see javax.sql.DataSource#getConnection(String, String)
*/
public HibernatePersistenceConfiguration jdbcUsername(String username) {
property( JDBC_USER, username );
return this;
}
/**
* Password for JDBC authentication.
*
* @see #JDBC_PASSWORD
* @see #jdbcUsername
* @see java.sql.DriverManager#getConnection(String, String, String)
* @see javax.sql.DataSource#getConnection(String, String)
*/
public HibernatePersistenceConfiguration jdbcPassword(String password) {
property( JDBC_PASSWORD, password );
return this;
}
/**
* Username and password for JDBC authentication.
*
* @see #JDBC_USER
* @see #JDBC_PASSWORD
* @see #jdbcUsername
* @see #jdbcPassword
* @see java.sql.DriverManager#getConnection(String, String, String)
* @see javax.sql.DataSource#getConnection(String, String)
*/
public HibernatePersistenceConfiguration jdbcCredentials(String username, String password) {
jdbcUsername( username );
jdbcPassword( password );
return this;
}
/**
* The JDBC connection pool size. This setting is ignored when Hibernate is
* configured to obtain connections from a {@link javax.sql.DataSource}.
*
* @see JdbcSettings#POOL_SIZE
*/
public HibernatePersistenceConfiguration jdbcPoolSize(int poolSize) {
property( JdbcSettings.POOL_SIZE, poolSize );
return this;
}
/**
* The JDBC {@linkplain java.sql.Connection#setAutoCommit autocommit mode}
* for pooled connections. This setting is ignored when Hibernate is
* configured to obtain connections from a {@link javax.sql.DataSource}.
*
* @see JdbcSettings#AUTOCOMMIT
*/
public HibernatePersistenceConfiguration jdbcAutocommit(boolean autocommit) {
property( JdbcSettings.AUTOCOMMIT, autocommit );
return this;
}
/**
* The JDBC {@linkplain java.sql.Connection#setTransactionIsolation transaction
* isolation level}. This setting is ignored when Hibernate is configured to
* obtain connections from a {@link javax.sql.DataSource}.
* <p>
* Possible values are enumerated by {@link java.sql.Connection}:
* {@link java.sql.Connection#TRANSACTION_READ_UNCOMMITTED},
* {@link java.sql.Connection#TRANSACTION_READ_COMMITTED},
* {@link java.sql.Connection#TRANSACTION_REPEATABLE_READ}, and
* {@link java.sql.Connection#TRANSACTION_SERIALIZABLE}.
*
* @see JdbcSettings#ISOLATION
*/
public HibernatePersistenceConfiguration jdbcTransactionIsolation(int isolationLevel) {
property( JdbcSettings.ISOLATION, isolationLevel );
return this;
}
/**
* Enables SQL logging to the console.
* <p>
* Sets {@value AvailableSettings#SHOW_SQL}, {@value AvailableSettings#FORMAT_SQL},
* and {@value AvailableSettings#HIGHLIGHT_SQL}.
*
* @param showSql should SQL be logged to console?
* @param formatSql should logged SQL be formatted
* @param highlightSql should logged SQL be highlighted with pretty colors
*/
public HibernatePersistenceConfiguration showSql(boolean showSql, boolean formatSql, boolean highlightSql) {
property( JdbcSettings.SHOW_SQL, showSql );
property( JdbcSettings.FORMAT_SQL, formatSql );
property( JdbcSettings.HIGHLIGHT_SQL, highlightSql );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* all aspects of {@linkplain jakarta.persistence.Query} handling.
*
* @see JpaComplianceSettings#JPA_QUERY_COMPLIANCE
*/
public HibernatePersistenceConfiguration queryCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_QUERY_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* all aspects of transaction handling.
*
* @see JpaComplianceSettings#JPA_TRANSACTION_COMPLIANCE
*/
public HibernatePersistenceConfiguration transactionCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_TRANSACTION_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* handling around calls to {@linkplain EntityManager#close()},
* {@linkplain EntityManager#isOpen()},
* {@linkplain EntityManagerFactory#close()} and
* {@linkplain EntityManagerFactory#isOpen()}
*
* @see JpaComplianceSettings#JPA_CLOSED_COMPLIANCE
*/
public HibernatePersistenceConfiguration closedCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_CLOSED_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* handling of proxies.
*
* @see JpaComplianceSettings#JPA_PROXY_COMPLIANCE
*/
public HibernatePersistenceConfiguration proxyCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_PROXY_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* handling of proxies.
*
* @see JpaComplianceSettings#JPA_PROXY_COMPLIANCE
*/
public HibernatePersistenceConfiguration cachingCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_PROXY_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* in terms of collecting all named value generators globally, regardless of location.
*
* @see JpaComplianceSettings#JPA_ID_GENERATOR_GLOBAL_SCOPE_COMPLIANCE
*/
public HibernatePersistenceConfiguration globalGeneratorCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_ID_GENERATOR_GLOBAL_SCOPE_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* the interpretation of {@link jakarta.persistence.OrderBy}.
*
* @see JpaComplianceSettings#JPA_ORDER_BY_MAPPING_COMPLIANCE
*/
public HibernatePersistenceConfiguration orderByMappingCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_ORDER_BY_MAPPING_COMPLIANCE, enabled );
return this;
}
/**
* Specifies whether Hibernate will strictly adhere to compliance with Jakarta Persistence for
* the allowed type of identifier value passed to
* {@link jakarta.persistence.EntityManager#getReference} and
* {@link jakarta.persistence.EntityManager#find}
*
* @see JpaComplianceSettings#JPA_LOAD_BY_ID_COMPLIANCE
*/
public HibernatePersistenceConfiguration loadByIdCompliance(boolean enabled) {
property( JpaComplianceSettings.JPA_LOAD_BY_ID_COMPLIANCE, enabled );
return this;
}
/**
* Enable or disable the second-level and query caches.
*/
public HibernatePersistenceConfiguration caching(CachingType type) {
assert Objects.nonNull( type );
if ( type == CachingType.NONE || type == CachingType.AUTO ) {
property( CacheSettings.USE_SECOND_LEVEL_CACHE, false );
property( CacheSettings.USE_QUERY_CACHE, false );
}
else if ( type == CachingType.BOTH ) {
property( CacheSettings.USE_SECOND_LEVEL_CACHE, true );
property( CacheSettings.USE_QUERY_CACHE, true );
}
else if ( type == CachingType.DATA ) {
property( CacheSettings.USE_SECOND_LEVEL_CACHE, true );
property( CacheSettings.USE_QUERY_CACHE, false );
}
else if ( type == CachingType.QUERY ) {
property( CacheSettings.USE_SECOND_LEVEL_CACHE, false );
property( CacheSettings.USE_QUERY_CACHE, true );
}
return this;
}
/**
* If {@linkplain CachingType#DATA data caching} is enabled, configure the
* type of concurrency access that should be applied if not explicitly specified
* on a cache region.
*
* @see org.hibernate.annotations.Cache#usage
* @see CacheSettings#DEFAULT_CACHE_CONCURRENCY_STRATEGY
*/
public HibernatePersistenceConfiguration cachingAccessType(AccessType type) {
// todo (7.0) : should this enable second-level cache if not?
property( CacheSettings.DEFAULT_CACHE_CONCURRENCY_STRATEGY, type );
return this;
}
/**
* Specify a {@linkplain StatementInspector} to be applied to all Sessions/EntityManagers
*
* @see JdbcSettings#STATEMENT_INSPECTOR
*/
public HibernatePersistenceConfiguration statementInspector(Class<? extends StatementInspector> inspectorImpl) {
property( JdbcSettings.STATEMENT_INSPECTOR, inspectorImpl );
return this;
}
/**
* Specify a {@linkplain StatementInspector} to be applied to all Sessions/EntityManagers
*
* @see JdbcSettings#STATEMENT_INSPECTOR
*/
public HibernatePersistenceConfiguration statementInspector(StatementInspector inspector) {
property( JdbcSettings.STATEMENT_INSPECTOR, inspector );
return this;
}
/**
* Configure a default catalog name to be used for database objects (tables, sequences, etc) which do not
* explicitly specify one.
*
* @see MappingSettings#DEFAULT_CATALOG
*/
public HibernatePersistenceConfiguration defaultCatalog(String catalogName) {
property( MappingSettings.DEFAULT_CATALOG, catalogName );
return this;
}
/**
* Configure a default schema name to be used for database objects (tables, sequences, etc) which do not
* explicitly specify one.
*
* @see MappingSettings#DEFAULT_SCHEMA
*/
public HibernatePersistenceConfiguration defaultSchema(String schemaName) {
property( MappingSettings.DEFAULT_SCHEMA, schemaName );
return this;
}
/**
* Configure a default schema name to be used for database objects (tables, sequences, etc) which do not
* explicitly specify one.
*
* @see MappingSettings#USE_NATIONALIZED_CHARACTER_DATA
*/
public HibernatePersistenceConfiguration nationalizedCharacterData(boolean enabled) {
property( MappingSettings.USE_NATIONALIZED_CHARACTER_DATA, enabled );
return this;
}
/**
* Configures whether Hibernate should process XML mappings ({@code orm.xml} files).
*
* @see MappingSettings#XML_MAPPING_ENABLED
*/
public HibernatePersistenceConfiguration xmlMappings(boolean enabled) {
property( MappingSettings.XML_MAPPING_ENABLED, enabled );
return this;
}
/**
* Configures whether Hibernate should validate (via schema descriptor) XML files.
*
* @see MappingSettings#VALIDATE_XML
*/
public HibernatePersistenceConfiguration xmlValidation(boolean enabled) {
property( MappingSettings.VALIDATE_XML, enabled );
return this;
}
/**
* Configures whether Hibernate should collect {@linkplain org.hibernate.stat.Statistics}.
*
* @see StatisticsSettings#GENERATE_STATISTICS
*/
public HibernatePersistenceConfiguration collectStatistics(boolean enabled) {
property( StatisticsSettings.GENERATE_STATISTICS, enabled );
return this;
}
/**
* Add the specified classes as {@linkplain #managedClasses() managed classes}.
*
* @see #managedClass
*/
public HibernatePersistenceConfiguration managedClasses(Class<?>... managedClasses) {
Collections.addAll( managedClasses(), managedClasses );
return this;
}
/**
* Add the specified classes as {@linkplain #managedClasses() managed classes}.
*
* @see #managedClass
*/
public HibernatePersistenceConfiguration managedClasses(Collection<Class<?>> managedClasses) {
managedClasses().addAll( managedClasses );
return this;
}
/**
* Add the specified resource names as {@linkplain #mappingFiles() mapping files}.
*
* @see #mappingFiles()
*/
public HibernatePersistenceConfiguration mappingFiles(String... names) {
Collections.addAll( mappingFiles(), names );
return this;
}
/**
* Add the specified resource names as {@linkplain #mappingFiles() mapping files}.
*
* @see #mappingFiles()
*/
public HibernatePersistenceConfiguration mappingFiles(Collection<String> names) {
mappingFiles().addAll( names );
return this;
}
/**
* Add the specified URL as a {@linkplain #jarFileUrls() JAR file}.
*
* @see #jarFileUrls()
*
* @since 7.1
*/
public HibernatePersistenceConfiguration jarFileUrl(URL url) {
jarFileUrls.add( url );
return this;
}
/**
* Add the specified URLs as {@linkplain #jarFileUrls() JAR files}.
*
* @see #jarFileUrls()
*
* @since 7.1
*/
public HibernatePersistenceConfiguration jarFileUrls(URL... urls) {
Collections.addAll( jarFileUrls, urls );
return this;
}
/**
* Add the specified URLs as {@linkplain #jarFileUrls() JAR files}.
*
* @see #jarFileUrls()
*
* @since 7.1
*/
public HibernatePersistenceConfiguration jarFileUrls(Collection<URL> urls) {
jarFileUrls.addAll( urls );
return this;
}
/**
* Specify the {@linkplain Action action} to take in terms of automatic
* database schema tooling.
*
* @apiNote This only controls tooling as exported directly to the database. To
* output tooling commands to scripts, use {@linkplain #properties(Map) config properties}
* instead with appropriate {@linkplain SchemaToolingSettings settings}.
*
* @see SchemaToolingSettings#HBM2DDL_AUTO
*/
public HibernatePersistenceConfiguration schemaToolingAction(Action action) {
property( SchemaToolingSettings.HBM2DDL_AUTO, action );
return this;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// covariant overrides
@Override
public HibernatePersistenceConfiguration provider(String providerClassName) {
return (HibernatePersistenceConfiguration) super.provider( providerClassName );
}
@Override
public HibernatePersistenceConfiguration jtaDataSource(String dataSourceJndiName) {
return (HibernatePersistenceConfiguration) super.jtaDataSource( dataSourceJndiName );
}
@Override
public HibernatePersistenceConfiguration nonJtaDataSource(String dataSourceJndiName) {
return (HibernatePersistenceConfiguration) super.nonJtaDataSource( dataSourceJndiName );
}
@Override
public HibernatePersistenceConfiguration managedClass(Class<?> managedClass) {
return (HibernatePersistenceConfiguration) super.managedClass( managedClass );
}
@Override
public HibernatePersistenceConfiguration mappingFile(String name) {
return (HibernatePersistenceConfiguration) super.mappingFile( name );
}
@Override
public HibernatePersistenceConfiguration transactionType(PersistenceUnitTransactionType transactionType) {
return (HibernatePersistenceConfiguration) super.transactionType( transactionType );
}
@Override
public HibernatePersistenceConfiguration sharedCacheMode(SharedCacheMode sharedCacheMode) {
return (HibernatePersistenceConfiguration) super.sharedCacheMode( sharedCacheMode );
}
@Override
public HibernatePersistenceConfiguration validationMode(ValidationMode validationMode) {
return (HibernatePersistenceConfiguration) super.validationMode( validationMode );
}
@Override
public HibernatePersistenceConfiguration property(String name, Object value) {
return (HibernatePersistenceConfiguration) super.property( name, value );
}
@Override
public HibernatePersistenceConfiguration properties(Map<String, ?> properties) {
return (HibernatePersistenceConfiguration) super.properties( properties );
}
/**
* URLs of JAR files.
* When {@linkplain org.hibernate.cfg.PersistenceSettings#SCANNER_DISCOVERY
* entity discovery} is enabled, the JAR files will be scanned for entities.
*
* @see org.hibernate.cfg.PersistenceSettings#SCANNER_DISCOVERY
* @see jakarta.persistence.spi.PersistenceUnitInfo#getJarFileUrls
*
* @since 7.1
*/
public List<URL> jarFileUrls() {
return jarFileUrls;
}
/**
* Root URL of the persistence unit.
* When {@linkplain org.hibernate.cfg.PersistenceSettings#SCANNER_DISCOVERY
* entity discovery} is enabled, this root URL will be scanned for entities.
*
* @see org.hibernate.cfg.PersistenceSettings#SCANNER_DISCOVERY
* @see jakarta.persistence.spi.PersistenceUnitInfo#getPersistenceUnitRootUrl
*
* @since 7.1
*/
public URL rootUrl() {
return rootUrl;
}
}
|
name
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/util/NativeTypes.java
|
{
"start": 8121,
"end": 8542
}
|
class ____ implements LiteralAnalyzer {
@Override
public void validate(String s) {
if ( !(s.length() == 3 && s.startsWith( "'" ) && s.endsWith( "'" )) ) {
throw new NumberFormatException( "invalid character literal" );
}
}
@Override
public Class<?> getLiteral() {
return char.class;
}
}
private static
|
CharAnalyzer
|
java
|
apache__camel
|
components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/ReplaceBeanFromMethodTest.java
|
{
"start": 1534,
"end": 1641
}
|
class ____ that an existing bean can be replaced with a bean created from a method.
*/
@CamelMainTest
|
ensuring
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/logging/logback/StructuredLogEncoderTests.java
|
{
"start": 1630,
"end": 6207
}
|
class ____ extends AbstractStructuredLoggingTests {
private StructuredLogEncoder encoder;
private Context loggerContext;
private MockEnvironment environment;
@Override
@BeforeEach
void setUp() {
super.setUp();
this.environment = new MockEnvironment();
this.environment.setProperty("logging.structured.json.stacktrace.printer",
SimpleStackTracePrinter.class.getName());
this.loggerContext = new ContextBase();
this.loggerContext.putObject(Environment.class.getName(), this.environment);
this.encoder = new StructuredLogEncoder();
this.encoder.setContext(this.loggerContext);
}
@Override
@AfterEach
void tearDown() {
super.tearDown();
this.encoder.stop();
}
@Test
@SuppressWarnings("unchecked")
void shouldSupportEcsCommonFormat() {
this.encoder.setFormat("ecs");
this.encoder.start();
LoggingEvent event = createEvent(new RuntimeException("Boom!"));
event.setMDCPropertyMap(Collections.emptyMap());
String json = encode(event);
Map<String, Object> deserialized = deserialize(json);
assertThat(deserialized).containsEntry("ecs", Map.of("version", "8.11"));
Map<String, Object> error = (Map<String, Object>) deserialized.get("error");
assertThat(error).isNotNull();
assertThat(error.get("stack_trace")).isEqualTo("stacktrace:RuntimeException");
}
@Test
@SuppressWarnings("unchecked")
void shouldOutputNestedAdditionalEcsJson() {
this.environment.setProperty("logging.structured.json.add.extra.value", "test");
this.encoder.setFormat("ecs");
this.encoder.start();
LoggingEvent event = createEvent();
event.setMDCPropertyMap(Collections.emptyMap());
String json = encode(event);
Map<String, Object> deserialized = deserialize(json);
assertThat(deserialized).containsKey("extra");
assertThat((Map<String, Object>) deserialized.get("extra")).containsEntry("value", "test");
System.out.println(deserialized);
}
@Test
void shouldSupportLogstashCommonFormat() {
this.encoder.setFormat("logstash");
this.encoder.start();
LoggingEvent event = createEvent(new RuntimeException("Boom!"));
event.setMDCPropertyMap(Collections.emptyMap());
String json = encode(event);
Map<String, Object> deserialized = deserialize(json);
assertThat(deserialized).containsKey("@version");
assertThat(deserialized.get("stack_trace")).isEqualTo("stacktrace:RuntimeException");
}
@Test
void shouldSupportGelfCommonFormat() {
this.encoder.setFormat("gelf");
this.encoder.start();
LoggingEvent event = createEvent(new RuntimeException("Boom!"));
event.setMDCPropertyMap(Collections.emptyMap());
String json = encode(event);
Map<String, Object> deserialized = deserialize(json);
assertThat(deserialized).containsKey("version");
assertThat(deserialized.get("_error_stack_trace")).isEqualTo("stacktrace:RuntimeException");
}
@Test
void shouldSupportCustomFormat() {
this.encoder.setFormat(CustomLogbackStructuredLoggingFormatter.class.getName());
this.encoder.start();
LoggingEvent event = createEvent();
event.setMDCPropertyMap(Collections.emptyMap());
String format = encode(event);
assertThat(format).isEqualTo("custom-format");
}
@Test
void shouldInjectCustomFormatConstructorParameters() {
this.environment.setProperty("spring.application.pid", "42");
this.encoder.setFormat(CustomLogbackStructuredLoggingFormatterWithInjection.class.getName());
this.encoder.start();
LoggingEvent event = createEvent();
event.setMDCPropertyMap(Collections.emptyMap());
String format = encode(event);
assertThat(format).isEqualTo("custom-format-with-injection pid=42 hasThrowableProxyConverter=true");
}
@Test
void shouldCheckTypeArgument() {
assertThatIllegalStateException().isThrownBy(() -> {
this.encoder.setFormat(CustomLogbackStructuredLoggingFormatterWrongType.class.getName());
this.encoder.start();
}).withMessageContaining("must be ch.qos.logback.classic.spi.ILoggingEvent but was java.lang.String");
}
@Test
void shouldCheckTypeArgumentWithRawType() {
assertThatIllegalStateException().isThrownBy(() -> {
this.encoder.setFormat(CustomLogbackStructuredLoggingFormatterRawType.class.getName());
this.encoder.start();
}).withMessageContaining("must be ch.qos.logback.classic.spi.ILoggingEvent but was null");
}
@Test
void shouldFailIfNoCommonOrCustomFormatIsSet() {
assertThatIllegalArgumentException().isThrownBy(() -> {
this.encoder.setFormat("does-not-exist");
this.encoder.start();
})
.withMessageContaining("Unknown format 'does-not-exist'. Values can be a valid fully-qualified "
+ "
|
StructuredLogEncoderTests
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/endpoint/AbstractOAuth2AuthorizationGrantRequest.java
|
{
"start": 1329,
"end": 1509
}
|
class ____ {
private final AuthorizationGrantType authorizationGrantType;
private final ClientRegistration clientRegistration;
/**
* Sub-
|
AbstractOAuth2AuthorizationGrantRequest
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/support/ResolvableType.java
|
{
"start": 35455,
"end": 35690
}
|
class ____ generics
* @see #forClassWithGenerics(Class, Class...)
*/
public static ResolvableType forClassWithGenerics(Class<?> sourceClass, ResolvableType... generics) {
LettuceAssert.notNull(sourceClass, "Source
|
and
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/job/JobResourceRequirementsHandler.java
|
{
"start": 1873,
"end": 2840
}
|
class ____
extends AbstractRestHandler<
RestfulGateway,
EmptyRequestBody,
JobResourceRequirementsBody,
JobMessageParameters> {
public JobResourceRequirementsHandler(
GatewayRetriever<? extends RestfulGateway> leaderRetriever,
Duration timeout,
Map<String, String> responseHeaders) {
super(leaderRetriever, timeout, responseHeaders, JobResourceRequirementsHeaders.INSTANCE);
}
@Override
protected CompletableFuture<JobResourceRequirementsBody> handleRequest(
@Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway)
throws RestHandlerException {
final JobID jobId = request.getPathParameter(JobIDPathParameter.class);
return gateway.requestJobResourceRequirements(jobId)
.thenApply(JobResourceRequirementsBody::new);
}
}
|
JobResourceRequirementsHandler
|
java
|
quarkusio__quarkus
|
extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/RequestHandlerJandexUtilTest.java
|
{
"start": 21688,
"end": 21990
}
|
class ____ implements DefaultMethodInterface {
@Override
public Long handleRequest(Double input, Context context) {
return input.longValue() * 2; // Different implementation
}
}
// Concrete parent with inheriting child
public static
|
ConcreteOverridesDefault
|
java
|
apache__flink
|
flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopViewFileSystemTruncateTest.java
|
{
"start": 2194,
"end": 6403
}
|
class ____ {
@TempDir static File tempFolder;
private final FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper("/tests");
private static MiniDFSCluster hdfsCluster;
private static FileSystem fHdfs;
private static org.apache.flink.core.fs.FileSystem fSystem;
private Configuration fsViewConf;
private FileSystem fsTarget;
private Path targetTestRoot;
@BeforeAll
static void testHadoopVersion() {
assumeThat(HadoopUtils.isMinHadoopVersion(2, 7)).isTrue();
}
@BeforeAll
static void verifyOS() {
assumeThat(OperatingSystem.isWindows())
.describedAs("HDFS cluster cannot be started on Windows without extensions.")
.isFalse();
}
@BeforeAll
static void createHDFS() throws Exception {
final File baseDir = tempFolder;
final Configuration hdConf = new Configuration();
hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
final MiniDFSCluster.Builder builder =
new MiniDFSCluster.Builder(hdConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1));
hdfsCluster = builder.build();
hdfsCluster.waitClusterUp();
fHdfs = hdfsCluster.getFileSystem(0);
}
@BeforeEach
void setUp() throws Exception {
fsTarget = fHdfs;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
fsTarget.delete(targetTestRoot, true);
fsTarget.mkdirs(targetTestRoot);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
fSystem = new HadoopFileSystem(fsView);
}
private void setupMountPoints() {
Path mountOnNn1 = new Path("/mountOnNn1");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
}
@AfterAll
static void shutdownCluster() {
hdfsCluster.shutdown();
}
@AfterEach
void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
}
@Test
void testViewFileSystemRecoverWorks() throws IOException {
final org.apache.flink.core.fs.Path testPath =
new org.apache.flink.core.fs.Path(fSystem.getUri() + "mountOnNn1/test-1");
final String expectedContent = "test_line";
final RecoverableWriter writer = fSystem.createRecoverableWriter();
final RecoverableFsDataOutputStream streamUnderTest =
getOpenStreamToFileWithContent(writer, testPath, expectedContent);
final ResumeRecoverable resumeRecover = streamUnderTest.persist();
final RecoverableFsDataOutputStream recover = writer.recover(resumeRecover);
final RecoverableWriter.CommitRecoverable committable =
recover.closeForCommit().getRecoverable();
final RecoverableWriter recoveredWriter = fSystem.createRecoverableWriter();
recoveredWriter.recoverForCommit(committable).commitAfterRecovery();
verifyFileContent(testPath, expectedContent);
}
private RecoverableFsDataOutputStream getOpenStreamToFileWithContent(
final RecoverableWriter writerUnderTest,
final org.apache.flink.core.fs.Path path,
final String expectedContent)
throws IOException {
final byte[] content = expectedContent.getBytes(UTF_8);
final RecoverableFsDataOutputStream streamUnderTest = writerUnderTest.open(path);
streamUnderTest.write(content);
return streamUnderTest;
}
private static void verifyFileContent(
final org.apache.flink.core.fs.Path testPath, final String expectedContent)
throws IOException {
try (FSDataInputStream in = fSystem.open(testPath);
InputStreamReader ir = new InputStreamReader(in, UTF_8);
BufferedReader reader = new BufferedReader(ir)) {
final String line = reader.readLine();
assertThat(line).isEqualTo(expectedContent);
}
}
}
|
HadoopViewFileSystemTruncateTest
|
java
|
apache__rocketmq
|
remoting/src/main/java/org/apache/rocketmq/remoting/protocol/route/MessageQueueRouteState.java
|
{
"start": 863,
"end": 1013
}
|
enum ____ {
// do not change below order, since ordinal() is used
Expired,
ReadOnly,
Normal,
WriteOnly,
;
}
|
MessageQueueRouteState
|
java
|
quarkusio__quarkus
|
extensions/netty/runtime/src/main/java/io/quarkus/netty/runtime/graal/NettySubstitutions.java
|
{
"start": 7869,
"end": 8360
}
|
class ____ {
@Substitute
public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc,
JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) {
return (SSLEngine) (Object) new Target_io_netty_handler_ssl_JdkAlpnSslEngine(engine, applicationNegotiator,
isServer);
}
}
@TargetClass(className = "io.netty.handler.ssl.JdkAlpnSslEngine")
final
|
Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator_AlpnWrapper
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java
|
{
"start": 25659,
"end": 26093
}
|
class ____ {
@HEAD("/foo/bar/") //
Call<ResponseBody> method() {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"HEAD method must use Void or Unit as response type.\n for method Example.method");
}
}
@Test
public void post() {
|
Example
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
|
{
"start": 62374,
"end": 62596
}
|
enum ____ declared in the configuration
* @param <E> enumeration type
* @throws IllegalArgumentException if one of the entries was unknown and ignoreUnknown is false,
* or there are two entries in the
|
values
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateSequenceTest_1_bugfix.java
|
{
"start": 937,
"end": 2454
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"CREATE SEQUENCE \"YDJC\".\"SEQ_ZW_DZFPKJRZ_RZBS\" START WITH ? INCREMENT BY ? MAXVALUE ? MINVALUE ? CACHE ?;";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("cdc.en_complaint_ipr_stat_fdt0")));
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
|
OracleCreateSequenceTest_1_bugfix
|
java
|
netty__netty
|
transport/src/main/java/io/netty/channel/nio/NioIoHandler.java
|
{
"start": 5051,
"end": 12584
}
|
class ____ {
final Selector unwrappedSelector;
final Selector selector;
SelectorTuple(Selector unwrappedSelector) {
this.unwrappedSelector = unwrappedSelector;
this.selector = unwrappedSelector;
}
SelectorTuple(Selector unwrappedSelector, Selector selector) {
this.unwrappedSelector = unwrappedSelector;
this.selector = selector;
}
}
private SelectorTuple openSelector() {
final Selector unwrappedSelector;
try {
unwrappedSelector = provider.openSelector();
} catch (IOException e) {
throw new ChannelException("failed to open a new selector", e);
}
if (DISABLE_KEY_SET_OPTIMIZATION) {
return new SelectorTuple(unwrappedSelector);
}
Object maybeSelectorImplClass = AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Object run() {
try {
return Class.forName(
"sun.nio.ch.SelectorImpl",
false,
PlatformDependent.getSystemClassLoader());
} catch (Throwable cause) {
return cause;
}
}
});
if (!(maybeSelectorImplClass instanceof Class) ||
// ensure the current selector implementation is what we can instrument.
!((Class<?>) maybeSelectorImplClass).isAssignableFrom(unwrappedSelector.getClass())) {
if (maybeSelectorImplClass instanceof Throwable) {
Throwable t = (Throwable) maybeSelectorImplClass;
logger.trace("failed to instrument a special java.util.Set into: {}", unwrappedSelector, t);
}
return new SelectorTuple(unwrappedSelector);
}
final Class<?> selectorImplClass = (Class<?>) maybeSelectorImplClass;
final SelectedSelectionKeySet selectedKeySet = new SelectedSelectionKeySet();
Object maybeException = AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Object run() {
try {
Field selectedKeysField = selectorImplClass.getDeclaredField("selectedKeys");
Field publicSelectedKeysField = selectorImplClass.getDeclaredField("publicSelectedKeys");
if (PlatformDependent.javaVersion() >= 9 && PlatformDependent.hasUnsafe()) {
// Let us try to use sun.misc.Unsafe to replace the SelectionKeySet.
// This allows us to also do this in Java9+ without any extra flags.
long selectedKeysFieldOffset = PlatformDependent.objectFieldOffset(selectedKeysField);
long publicSelectedKeysFieldOffset =
PlatformDependent.objectFieldOffset(publicSelectedKeysField);
if (selectedKeysFieldOffset != -1 && publicSelectedKeysFieldOffset != -1) {
PlatformDependent.putObject(
unwrappedSelector, selectedKeysFieldOffset, selectedKeySet);
PlatformDependent.putObject(
unwrappedSelector, publicSelectedKeysFieldOffset, selectedKeySet);
return null;
}
// We could not retrieve the offset, lets try reflection as last-resort.
}
Throwable cause = ReflectionUtil.trySetAccessible(selectedKeysField, true);
if (cause != null) {
return cause;
}
cause = ReflectionUtil.trySetAccessible(publicSelectedKeysField, true);
if (cause != null) {
return cause;
}
selectedKeysField.set(unwrappedSelector, selectedKeySet);
publicSelectedKeysField.set(unwrappedSelector, selectedKeySet);
return null;
} catch (NoSuchFieldException | IllegalAccessException e) {
return e;
}
}
});
if (maybeException instanceof Exception) {
selectedKeys = null;
Exception e = (Exception) maybeException;
logger.trace("failed to instrument a special java.util.Set into: {}", unwrappedSelector, e);
return new SelectorTuple(unwrappedSelector);
}
selectedKeys = selectedKeySet;
logger.trace("instrumented a special java.util.Set into: {}", unwrappedSelector);
return new SelectorTuple(unwrappedSelector,
new SelectedSelectionKeySetSelector(unwrappedSelector, selectedKeySet));
}
/**
* Returns the {@link SelectorProvider} used by this {@link NioEventLoop} to obtain the {@link Selector}.
*/
public SelectorProvider selectorProvider() {
return provider;
}
Selector selector() {
return selector;
}
int numRegistered() {
return selector().keys().size() - cancelledKeys;
}
Set<SelectionKey> registeredSet() {
return selector().keys();
}
void rebuildSelector0() {
final Selector oldSelector = selector;
final SelectorTuple newSelectorTuple;
if (oldSelector == null) {
return;
}
try {
newSelectorTuple = openSelector();
} catch (Exception e) {
logger.warn("Failed to create a new Selector.", e);
return;
}
// Register all channels to the new Selector.
int nChannels = 0;
for (SelectionKey key : oldSelector.keys()) {
DefaultNioRegistration handle = (DefaultNioRegistration) key.attachment();
try {
if (!key.isValid() || key.channel().keyFor(newSelectorTuple.unwrappedSelector) != null) {
continue;
}
handle.register(newSelectorTuple.unwrappedSelector);
nChannels++;
} catch (Exception e) {
logger.warn("Failed to re-register a NioHandle to the new Selector.", e);
handle.cancel();
}
}
selector = newSelectorTuple.selector;
unwrappedSelector = newSelectorTuple.unwrappedSelector;
try {
// time to close the old selector as everything else is registered to the new one
oldSelector.close();
} catch (Throwable t) {
if (logger.isWarnEnabled()) {
logger.warn("Failed to close the old Selector.", t);
}
}
if (logger.isInfoEnabled()) {
logger.info("Migrated " + nChannels + " channel(s) to the new Selector.");
}
}
private static NioIoHandle nioHandle(IoHandle handle) {
if (handle instanceof NioIoHandle) {
return (NioIoHandle) handle;
}
throw new IllegalArgumentException("IoHandle of type " + StringUtil.simpleClassName(handle) + " not supported");
}
private static NioIoOps cast(IoOps ops) {
if (ops instanceof NioIoOps) {
return (NioIoOps) ops;
}
throw new IllegalArgumentException("IoOps of type " + StringUtil.simpleClassName(ops) + " not supported");
}
final
|
SelectorTuple
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
|
{
"start": 6765,
"end": 8304
}
|
class ____ extends MasterNodeRequest<Request> implements IndicesRequest.Replaceable {
private String[] indices = Strings.EMPTY_ARRAY;
private final RefCounted refCounted = AbstractRefCounted.of(() -> {});
Request() {
super(TEST_REQUEST_TIMEOUT);
}
Request(StreamInput in) throws IOException {
super(in);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
@Override
public String[] indices() {
return indices;
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictExpandOpen();
}
@Override
public IndicesRequest indices(String... indices) {
this.indices = indices;
return this;
}
@Override
public void incRef() {
refCounted.incRef();
}
@Override
public boolean tryIncRef() {
return refCounted.tryIncRef();
}
@Override
public boolean decRef() {
return refCounted.decRef();
}
@Override
public boolean hasReferences() {
return refCounted.hasReferences();
}
}
|
Request
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/dirty/DirtyTrackingCollectionInDefaultFetchGroupTest.java
|
{
"start": 1528,
"end": 2834
}
|
class ____ {
@BeforeEach
public void prepare(SessionFactoryScope scope) {
assertTrue( scope.getSessionFactory().getSessionFactoryOptions().isCollectionsInDefaultFetchGroupEnabled() );
scope.inTransaction( em -> {
StringsEntity entity = new StringsEntity();
entity.id = 1L;
entity.someStrings = new ArrayList<>( Arrays.asList( "a", "b", "c" ) );
em.persist( entity );
} );
}
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction( entityManager -> {
StringsEntity entity = entityManager.find( StringsEntity.class, 1L );
entityManager.flush();
BytecodeLazyAttributeInterceptor interceptor = (BytecodeLazyAttributeInterceptor) ( (PersistentAttributeInterceptable) entity )
.$$_hibernate_getInterceptor();
// the attributes are initialized with a PersistentCollection that is not initialized
assertFalse( interceptor.hasAnyUninitializedAttributes() );
assertTrue( interceptor.isAttributeLoaded( "someStrings" ) );
assertTrue( interceptor.isAttributeLoaded( "someStringEntities" ) );
assertFalse( Hibernate.isInitialized( entity.someStrings ) );
assertFalse( Hibernate.isInitialized( entity.someStringEntities ) );
} );
}
// --- //
@Entity
@Table( name = "STRINGS_ENTITY" )
static
|
DirtyTrackingCollectionInDefaultFetchGroupTest
|
java
|
elastic__elasticsearch
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/internal/vectorization/ESVectorUtilSupport.java
|
{
"start": 535,
"end": 2436
}
|
interface ____ {
/**
* The number of bits in bit-quantized query vectors
*/
short B_QUERY = 4;
/**
* Compute dot product between {@code q} and {@code d}
* @param q query vector, {@link #B_QUERY}-bit quantized and striped (see {@code ESVectorUtil.transposeHalfByte})
* @param d data vector, 1-bit quantized
*/
long ipByteBinByte(byte[] q, byte[] d);
int ipByteBit(byte[] q, byte[] d);
float ipFloatBit(float[] q, byte[] d);
float ipFloatByte(float[] q, byte[] d);
float calculateOSQLoss(
float[] target,
float lowerInterval,
float upperInterval,
float step,
float invStep,
float norm2,
float lambda,
int[] quantize
);
void calculateOSQGridPoints(float[] target, int[] quantize, int points, float[] pts);
void centerAndCalculateOSQStatsEuclidean(float[] target, float[] centroid, float[] centered, float[] stats);
void centerAndCalculateOSQStatsDp(float[] target, float[] centroid, float[] centered, float[] stats);
float soarDistance(float[] v1, float[] centroid, float[] originalResidual, float soarLambda, float rnorm);
int quantizeVectorWithIntervals(float[] vector, int[] quantize, float lowInterval, float upperInterval, byte bit);
void squareDistanceBulk(float[] query, float[] v0, float[] v1, float[] v2, float[] v3, float[] distances);
void soarDistanceBulk(
float[] v1,
float[] c0,
float[] c1,
float[] c2,
float[] c3,
float[] originalResidual,
float soarLambda,
float rnorm,
float[] distances
);
void packAsBinary(int[] vector, byte[] packed);
void packDibit(int[] vector, byte[] packed);
void transposeHalfByte(int[] q, byte[] quantQueryByte);
int indexOf(byte[] bytes, int offset, int length, byte marker);
}
|
ESVectorUtilSupport
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/util/StateHandleStoreUtilsTest.java
|
{
"start": 1427,
"end": 3574
}
|
class ____ {
@Test
void testSerializationAndDeserialization() throws Exception {
final TestingLongStateHandleHelper.LongStateHandle original =
new TestingLongStateHandleHelper.LongStateHandle(42L);
byte[] serializedData = StateHandleStoreUtils.serializeOrDiscard(original);
final TestingLongStateHandleHelper.LongStateHandle deserializedInstance =
StateHandleStoreUtils.deserialize(serializedData);
assertThat(deserializedInstance.getStateSize()).isEqualTo(original.getStateSize());
assertThat(deserializedInstance.getValue()).isEqualTo(original.getValue());
}
@Test
void testSerializeOrDiscardFailureHandling() throws Exception {
final AtomicBoolean discardCalled = new AtomicBoolean(false);
final StateObject original =
new FailingSerializationStateObject(() -> discardCalled.set(true));
assertThatThrownBy(() -> StateHandleStoreUtils.serializeOrDiscard(original))
.withFailMessage("An IOException is expected to be thrown.")
.isInstanceOf(IOException.class);
assertThat(discardCalled).isTrue();
}
@Test
void testSerializationOrDiscardWithDiscardFailure() throws Exception {
final Exception discardException =
new IllegalStateException(
"Expected IllegalStateException that should be suppressed.");
final StateObject original =
new FailingSerializationStateObject(
() -> {
throw discardException;
});
assertThatThrownBy(() -> StateHandleStoreUtils.serializeOrDiscard(original))
.withFailMessage("An IOException is expected to be thrown.")
.isInstanceOf(IOException.class)
.satisfies(
e -> {
assertThat(e.getSuppressed()).hasSize(1);
assertThat(e.getSuppressed()[0]).isEqualTo(discardException);
});
}
private static
|
StateHandleStoreUtilsTest
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/stat/StatAnnotationTest.java
|
{
"start": 657,
"end": 1316
}
|
class ____ extends TestCase {
@Autowired
private UserService userService;
@Test
public void test_0() throws InterruptedException {
userService.save();
List<Map<String, Object>> result = SpringStatManager.getInstance().getMethodStatData();
Assert.assertNotNull(result);
Assert.assertEquals(1, result.size());
Map<String, Object> statItem = result.get(0);
Assert.assertEquals("com.alibaba.druid.stat.spring.UserService", statItem.get("Class"));
Assert.assertEquals("save()", statItem.get("Method"));
Assert.assertEquals(1L, statItem.get("ExecuteCount"));
}
}
|
StatAnnotationTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/config/ConfigMappingInvalidTest.java
|
{
"start": 4200,
"end": 4620
}
|
interface ____ {
String host();
@Min(8000)
int port();
@WithConverter(DurationConverter.class)
Duration timeout();
@WithName("io-threads")
int threads();
@WithParentName
Map<String, Form> form();
Optional<Ssl> ssl();
Optional<Proxy> proxy();
Optional<Cors> cors();
Log log();
Info info();
|
Cloud
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/constructornoinject/DisposesParamConstructorTest.java
|
{
"start": 1042,
"end": 1141
}
|
class ____ {
@Inject
public MyBean(@Disposes String ignored) {
}
}
}
|
MyBean
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryBreakInSwitchTest.java
|
{
"start": 3752,
"end": 4268
}
|
class ____ {
void f(int i) {
switch (i) {
default -> {
if (true) {
// BUG: Diagnostic contains: break is unnecessary
break;
} else {
// BUG: Diagnostic contains: break is unnecessary
break;
}
}
}
;
}
}
""")
.doTest();
}
}
|
Test
|
java
|
quarkusio__quarkus
|
extensions/mailer/deployment/src/test/java/io/quarkus/mailer/NamedMailersTemplatesInjectionTest.java
|
{
"start": 5337,
"end": 5783
}
|
class ____ {
@Inject
MailTemplate test1;
@Location("mails/test2")
MailTemplate testMail;
Uni<Void> send1() {
return test1.to("quarkus-send1@quarkus.io").subject("Test").data("name", "John").send();
}
Uni<Void> send2() {
return testMail.to("quarkus-send2@quarkus.io").subject("Test").data("name", "Lu").send();
}
}
@Singleton
static
|
MailTemplates
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mappedBy/MappedByNonAssociationTest.java
|
{
"start": 916,
"end": 1784
}
|
class ____ {
@Test void test(SessionFactoryScope scope) {
Extensions ex = new Extensions();
ex.exExtensionDays = 3L;
ex.exNo = 1L;
ex.exLoanId = 4L;
Loan loan = new Loan();
loan.id = 4L;
loan.extensions.add(ex);
scope.inTransaction(s -> s.persist(loan));
Loan l1 = scope.fromTransaction(s -> {
Loan ll = s.find(Loan.class, loan.id);
Hibernate.initialize(ll.extensions);
return ll;
});
assertEquals( 1, l1.extensions.size() );
assertEquals( loan.id, l1.id );
assertEquals( ex.exLoanId, l1.extensions.get(0).exLoanId );
Loan l2 = scope.fromSession(s -> s.createQuery("from Loan join fetch extensions", Loan.class).getSingleResult());
assertEquals( 1, l2.extensions.size() );
assertEquals( loan.id, l2.id );
assertEquals( ex.exLoanId, l2.extensions.get(0).exLoanId );
}
@Entity(name="Loan")
static
|
MappedByNonAssociationTest
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/WebProperties.java
|
{
"start": 4194,
"end": 6147
}
|
class ____ {
boolean customized;
/**
* Whether to enable the Spring Resource Handling chain. By default, disabled
* unless at least one strategy has been enabled.
*/
private @Nullable Boolean enabled;
/**
* Whether to enable caching in the Resource chain.
*/
private boolean cache = true;
/**
* Whether to enable resolution of already compressed resources (gzip,
* brotli). Checks for a resource name with the '.gz' or '.br' file
* extensions.
*/
private boolean compressed;
private final Strategy strategy = new Strategy();
/**
* Return whether the resource chain is enabled. Return {@code null} if no
* specific settings are present.
* @return whether the resource chain is enabled or {@code null} if no
* specified settings are present.
*/
public @Nullable Boolean getEnabled() {
return getEnabled(getStrategy().getFixed().isEnabled(), getStrategy().getContent().isEnabled(),
this.enabled);
}
private boolean hasBeenCustomized() {
return this.customized || getStrategy().hasBeenCustomized();
}
public void setEnabled(Boolean enabled) {
this.enabled = enabled;
this.customized = true;
}
public boolean isCache() {
return this.cache;
}
public void setCache(boolean cache) {
this.cache = cache;
this.customized = true;
}
public Strategy getStrategy() {
return this.strategy;
}
public boolean isCompressed() {
return this.compressed;
}
public void setCompressed(boolean compressed) {
this.compressed = compressed;
this.customized = true;
}
static @Nullable Boolean getEnabled(boolean fixedEnabled, boolean contentEnabled,
@Nullable Boolean chainEnabled) {
return (fixedEnabled || contentEnabled) ? Boolean.TRUE : chainEnabled;
}
/**
* Strategies for extracting and embedding a resource version in its URL path.
*/
public static
|
Chain
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/AbstractBigDecimalScaleAssert.java
|
{
"start": 719,
"end": 1550
}
|
class ____<SELF extends AbstractBigDecimalAssert<SELF>>
extends AbstractIntegerAssert<AbstractBigDecimalScaleAssert<SELF>> {
protected AbstractBigDecimalScaleAssert(Integer actualScale, Class<?> selfType) {
super(actualScale, selfType);
}
/**
* Returns to the BigDecimal on which we ran scale assertions on.
* <p>
* Example:
* <pre><code class='java'> assertThat(new BigDecimal("2.313")).scale()
* .isGreaterThan(1L)
* .isLessThan(5L)
* .returnToBigDecimal()
* .isPositive();</code></pre>
*
* @return BigDecimal assertions.
*/
public abstract AbstractBigDecimalAssert<SELF> returnToBigDecimal();
}
|
AbstractBigDecimalScaleAssert
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
|
{
"start": 4078,
"end": 4301
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestContainerResizing.class);
private final int GB = 1024;
private YarnConfiguration conf;
RMNodeLabelsManager mgr;
|
TestContainerResizing
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/eventbus/StringCatcher.java
|
{
"start": 1080,
"end": 1462
}
|
class ____ {
private final List<String> events = new ArrayList<>();
@Subscribe
public void hereHaveAString(@Nullable String string) {
events.add(string);
}
public void methodWithoutAnnotation(@Nullable String string) {
Assert.fail("Event bus must not call methods without @Subscribe!");
}
public List<String> getEvents() {
return events;
}
}
|
StringCatcher
|
java
|
google__guava
|
android/guava-tests/benchmark/com/google/common/util/concurrent/MonitorBasedPriorityBlockingQueue.java
|
{
"start": 17936,
"end": 18114
}
|
class ____ method
@Override
public Iterator<E> iterator() {
return new Itr(toArray());
}
/** Snapshot iterator that works off copy of underlying q array. */
private
|
to
|
java
|
apache__spark
|
common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/BlockPushingListener.java
|
{
"start": 963,
"end": 1133
}
|
interface ____
* {@link BlockFetchingListener} are unified under {@link BlockTransferListener} to allow
* code reuse for handling block push and fetch retry.
*/
public
|
and
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeOnErrorNext.java
|
{
"start": 1709,
"end": 3524
}
|
class ____<T>
extends AtomicReference<Disposable>
implements MaybeObserver<T>, Disposable {
private static final long serialVersionUID = 2026620218879969836L;
final MaybeObserver<? super T> downstream;
final Function<? super Throwable, ? extends MaybeSource<? extends T>> resumeFunction;
OnErrorNextMaybeObserver(MaybeObserver<? super T> actual,
Function<? super Throwable, ? extends MaybeSource<? extends T>> resumeFunction) {
this.downstream = actual;
this.resumeFunction = resumeFunction;
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.setOnce(this, d)) {
downstream.onSubscribe(this);
}
}
@Override
public void onSuccess(T value) {
downstream.onSuccess(value);
}
@Override
public void onError(Throwable e) {
MaybeSource<? extends T> m;
try {
m = Objects.requireNonNull(resumeFunction.apply(e), "The resumeFunction returned a null MaybeSource");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
downstream.onError(new CompositeException(e, ex));
return;
}
DisposableHelper.replace(this, null);
m.subscribe(new NextMaybeObserver<T>(downstream, this));
}
@Override
public void onComplete() {
downstream.onComplete();
}
static final
|
OnErrorNextMaybeObserver
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/ArcConfig.java
|
{
"start": 5307,
"end": 6157
}
|
class ____ as defined by {@link Class#getSimpleName()}, i.e. {@code Foo}</li>
* <li>a package name with suffix {@code .*}, i.e. {@code org.acme.*}, matches a package</li>
* <li>a package name with suffix {@code .**}, i.e. {@code org.acme.**}, matches a package that starts with the value</li>
* </ul>
* If any element value matches a discovered type then the type is excluded from discovery, i.e. no beans and observer
* methods are created from this type.
*/
Optional<List<String>> excludeTypes();
/**
* List of types that should be considered unremovable regardless of whether they are directly used or not.
* This is a configuration option equivalent to using {@link io.quarkus.arc.Unremovable} annotation.
*
* <p>
* An element value can be:
* <ul>
* <li>a fully qualified
|
name
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JooqEndpointBuilderFactory.java
|
{
"start": 22133,
"end": 29493
}
|
interface ____
extends
EndpointConsumerBuilder {
default JooqEndpointConsumerBuilder basic() {
return (JooqEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option is a:
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder pollStrategy(org.apache.camel.spi.PollingConsumerPollStrategy pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedJooqEndpointConsumerBuilder pollStrategy(String pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
}
/**
* Builder for endpoint producers for the JOOQ component.
*/
public
|
AdvancedJooqEndpointConsumerBuilder
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/function/server/ServerResponse.java
|
{
"start": 2301,
"end": 7360
}
|
interface ____ {
/**
* Return the status code of this response.
* @return the status as an HttpStatusCode value
*/
HttpStatusCode statusCode();
/**
* Return the headers of this response.
*/
HttpHeaders headers();
/**
* Return the cookies of this response.
*/
MultiValueMap<String, ResponseCookie> cookies();
/**
* Write this response to the given web exchange.
* @param exchange the web exchange to write to
* @param context the context to use when writing
* @return {@code Mono<Void>} to indicate when writing is complete
*/
Mono<Void> writeTo(ServerWebExchange exchange, Context context);
// Static methods
/**
* Create a builder with the status code and headers of the given response.
* @param other the response to copy the status and headers from
* @return the created builder
*/
static BodyBuilder from(ServerResponse other) {
return new DefaultServerResponseBuilder(other);
}
/**
* Create a {@code ServerResponse} from the given {@link ErrorResponse}.
* @param response the {@link ErrorResponse} to initialize from
* @return {@code Mono} with the built response
* @since 6.0
*/
static Mono<ServerResponse> from(ErrorResponse response) {
return status(response.getStatusCode())
.headers(headers -> headers.putAll(response.getHeaders()))
.bodyValue(response.getBody());
}
/**
* Create a builder with the given HTTP status.
* @param status the response status
* @return the created builder
*/
static BodyBuilder status(HttpStatusCode status) {
return new DefaultServerResponseBuilder(status);
}
/**
* Create a builder with the given HTTP status.
* @param status the response status
* @return the created builder
* @since 5.0.3
*/
static BodyBuilder status(int status) {
return new DefaultServerResponseBuilder(HttpStatusCode.valueOf(status));
}
/**
* Create a builder with the status set to {@linkplain HttpStatus#OK 200 OK}.
* @return the created builder
*/
static BodyBuilder ok() {
return status(HttpStatus.OK);
}
/**
* Create a new builder with a {@linkplain HttpStatus#CREATED 201 Created} status
* and a location header set to the given URI.
* @param location the location URI
* @return the created builder
*/
static BodyBuilder created(URI location) {
BodyBuilder builder = status(HttpStatus.CREATED);
return builder.location(location);
}
/**
* Create a builder with an {@linkplain HttpStatus#ACCEPTED 202 Accepted} status.
* @return the created builder
*/
static BodyBuilder accepted() {
return status(HttpStatus.ACCEPTED);
}
/**
* Create a builder with a {@linkplain HttpStatus#NO_CONTENT 204 No Content} status.
* @return the created builder
*/
static HeadersBuilder<?> noContent() {
return status(HttpStatus.NO_CONTENT);
}
/**
* Create a builder with a {@linkplain HttpStatus#SEE_OTHER 303 See Other}
* status and a location header set to the given URI.
* @param location the location URI
* @return the created builder
*/
static BodyBuilder seeOther(URI location) {
BodyBuilder builder = status(HttpStatus.SEE_OTHER);
return builder.location(location);
}
/**
* Create a builder with a {@linkplain HttpStatus#TEMPORARY_REDIRECT 307 Temporary Redirect}
* status and a location header set to the given URI.
* @param location the location URI
* @return the created builder
*/
static BodyBuilder temporaryRedirect(URI location) {
BodyBuilder builder = status(HttpStatus.TEMPORARY_REDIRECT);
return builder.location(location);
}
/**
* Create a builder with a {@linkplain HttpStatus#PERMANENT_REDIRECT 308 Permanent Redirect}
* status and a location header set to the given URI.
* @param location the location URI
* @return the created builder
*/
static BodyBuilder permanentRedirect(URI location) {
BodyBuilder builder = status(HttpStatus.PERMANENT_REDIRECT);
return builder.location(location);
}
/**
* Create a builder with a {@linkplain HttpStatus#BAD_REQUEST 400 Bad Request} status.
* @return the created builder
*/
static BodyBuilder badRequest() {
return status(HttpStatus.BAD_REQUEST);
}
/**
* Create a builder with a {@linkplain HttpStatus#NOT_FOUND 404 Not Found} status.
* @return the created builder
*/
static HeadersBuilder<?> notFound() {
return status(HttpStatus.NOT_FOUND);
}
/**
* Create a builder with an
* {@linkplain HttpStatus#UNPROCESSABLE_CONTENT 422 Unprocessable Content} status.
* @return the created builder
*/
static BodyBuilder unprocessableContent() {
return status(HttpStatus.UNPROCESSABLE_CONTENT);
}
/**
* Create a builder with an
* {@linkplain HttpStatus#UNPROCESSABLE_ENTITY 422 Unprocessable Entity} status.
* @return the created builder
* @deprecated since 7.0 in favor of {@link #unprocessableContent()}
*/
@Deprecated(since = "7.0")
static BodyBuilder unprocessableEntity() {
return status(HttpStatus.UNPROCESSABLE_ENTITY);
}
/**
* Defines a builder that adds headers to the response.
* @param <B> the builder subclass
*/
|
ServerResponse
|
java
|
google__guava
|
guava-tests/test/com/google/common/collect/FluentIterableTest.java
|
{
"start": 16217,
"end": 31782
}
|
class ____
implements Function<Integer, List<? extends String>> {
@Override
public List<String> apply(Integer from) {
String value = String.valueOf(from);
return ImmutableList.of(value, value);
}
}
public void testTransformAndConcat_wildcardFunctionGenerics() {
List<Integer> input = asList(1, 2, 3);
FluentIterable<String> unused =
FluentIterable.from(input).transformAndConcat(new RepeatedStringValueOfWildcardFunction());
}
public void testFirst_list() {
List<String> list = Lists.newArrayList("a", "b", "c");
assertThat(FluentIterable.from(list).first()).hasValue("a");
}
public void testFirst_null() {
List<String> list = Lists.newArrayList(null, "a", "b");
assertThrows(NullPointerException.class, () -> FluentIterable.from(list).first());
}
public void testFirst_emptyList() {
List<String> list = emptyList();
assertThat(FluentIterable.from(list).first()).isAbsent();
}
public void testFirst_sortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of("b", "c", "a");
assertThat(FluentIterable.from(sortedSet).first()).hasValue("a");
}
public void testFirst_emptySortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of();
assertThat(FluentIterable.from(sortedSet).first()).isAbsent();
}
public void testFirst_iterable() {
Set<String> set = ImmutableSet.of("a", "b", "c");
assertThat(FluentIterable.from(set).first()).hasValue("a");
}
public void testFirst_emptyIterable() {
Set<String> set = new HashSet<>();
assertThat(FluentIterable.from(set).first()).isAbsent();
}
public void testLast_list() {
List<String> list = Lists.newArrayList("a", "b", "c");
assertThat(FluentIterable.from(list).last()).hasValue("c");
}
public void testLast_null() {
List<String> list = Lists.newArrayList("a", "b", null);
assertThrows(NullPointerException.class, () -> FluentIterable.from(list).last());
}
public void testLast_emptyList() {
List<String> list = emptyList();
assertThat(FluentIterable.from(list).last()).isAbsent();
}
public void testLast_sortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of("b", "c", "a");
assertThat(FluentIterable.from(sortedSet).last()).hasValue("c");
}
public void testLast_emptySortedSet() {
SortedSet<String> sortedSet = ImmutableSortedSet.of();
assertThat(FluentIterable.from(sortedSet).last()).isAbsent();
}
public void testLast_iterable() {
Set<String> set = ImmutableSet.of("a", "b", "c");
assertThat(FluentIterable.from(set).last()).hasValue("c");
}
public void testLast_emptyIterable() {
Set<String> set = new HashSet<>();
assertThat(FluentIterable.from(set).last()).isAbsent();
}
public void testSkip_simple() {
Collection<String> set = ImmutableSet.of("a", "b", "c", "d", "e");
assertEquals(
Lists.newArrayList("c", "d", "e"), Lists.newArrayList(FluentIterable.from(set).skip(2)));
assertEquals("[c, d, e]", FluentIterable.from(set).skip(2).toString());
}
public void testSkip_simpleList() {
Collection<String> list = Lists.newArrayList("a", "b", "c", "d", "e");
assertEquals(
Lists.newArrayList("c", "d", "e"), Lists.newArrayList(FluentIterable.from(list).skip(2)));
assertEquals("[c, d, e]", FluentIterable.from(list).skip(2).toString());
}
public void testSkip_pastEnd() {
Collection<String> set = ImmutableSet.of("a", "b");
assertEquals(emptyList(), Lists.newArrayList(FluentIterable.from(set).skip(20)));
}
public void testSkip_pastEndList() {
Collection<String> list = Lists.newArrayList("a", "b");
assertEquals(emptyList(), Lists.newArrayList(FluentIterable.from(list).skip(20)));
}
public void testSkip_skipNone() {
Collection<String> set = ImmutableSet.of("a", "b");
assertEquals(
Lists.newArrayList("a", "b"), Lists.newArrayList(FluentIterable.from(set).skip(0)));
}
public void testSkip_skipNoneList() {
Collection<String> list = Lists.newArrayList("a", "b");
assertEquals(
Lists.newArrayList("a", "b"), Lists.newArrayList(FluentIterable.from(list).skip(0)));
}
public void testSkip_iterator() throws Exception {
new IteratorTester<Integer>(
5,
IteratorFeature.MODIFIABLE,
Lists.newArrayList(2, 3),
IteratorTester.KnownOrder.KNOWN_ORDER) {
@Override
protected Iterator<Integer> newTargetIterator() {
Collection<Integer> collection = new LinkedHashSet<>();
Collections.addAll(collection, 1, 2, 3);
return FluentIterable.from(collection).skip(1).iterator();
}
}.test();
}
public void testSkip_iteratorList() throws Exception {
new IteratorTester<Integer>(
5,
IteratorFeature.MODIFIABLE,
Lists.newArrayList(2, 3),
IteratorTester.KnownOrder.KNOWN_ORDER) {
@Override
protected Iterator<Integer> newTargetIterator() {
return FluentIterable.from(Lists.newArrayList(1, 2, 3)).skip(1).iterator();
}
}.test();
}
public void testSkip_nonStructurallyModifiedList() throws Exception {
List<String> list = Lists.newArrayList("a", "b", "c");
FluentIterable<String> tail = FluentIterable.from(list).skip(1);
Iterator<String> tailIterator = tail.iterator();
list.set(2, "c2");
assertEquals("b", tailIterator.next());
assertEquals("c2", tailIterator.next());
assertFalse(tailIterator.hasNext());
}
public void testSkip_structurallyModifiedSkipSome() throws Exception {
Collection<String> set = new LinkedHashSet<>();
Collections.addAll(set, "a", "b", "c");
FluentIterable<String> tail = FluentIterable.from(set).skip(1);
set.remove("b");
set.addAll(Lists.newArrayList("X", "Y", "Z"));
assertThat(tail).containsExactly("c", "X", "Y", "Z").inOrder();
}
public void testSkip_structurallyModifiedSkipSomeList() throws Exception {
List<String> list = Lists.newArrayList("a", "b", "c");
FluentIterable<String> tail = FluentIterable.from(list).skip(1);
list.subList(1, 3).clear();
list.addAll(0, Lists.newArrayList("X", "Y", "Z"));
assertThat(tail).containsExactly("Y", "Z", "a").inOrder();
}
public void testSkip_structurallyModifiedSkipAll() throws Exception {
Collection<String> set = new LinkedHashSet<>();
Collections.addAll(set, "a", "b", "c");
FluentIterable<String> tail = FluentIterable.from(set).skip(2);
set.remove("a");
set.remove("b");
assertFalse(tail.iterator().hasNext());
}
public void testSkip_structurallyModifiedSkipAllList() throws Exception {
List<String> list = Lists.newArrayList("a", "b", "c");
FluentIterable<String> tail = FluentIterable.from(list).skip(2);
list.subList(0, 2).clear();
assertThat(tail).isEmpty();
}
public void testSkip_illegalArgument() {
assertThrows(
IllegalArgumentException.class, () -> FluentIterable.from(asList("a", "b", "c")).skip(-1));
}
public void testLimit() {
Iterable<String> iterable = Lists.newArrayList("foo", "bar", "baz");
FluentIterable<String> limited = FluentIterable.from(iterable).limit(2);
assertEquals(ImmutableList.of("foo", "bar"), Lists.newArrayList(limited));
assertCanIterateAgain(limited);
assertEquals("[foo, bar]", limited.toString());
}
public void testLimit_illegalArgument() {
assertThrows(
IllegalArgumentException.class,
() -> {
FluentIterable<String> unused =
FluentIterable.from(Lists.newArrayList("a", "b", "c")).limit(-1);
});
}
public void testIsEmpty() {
assertTrue(FluentIterable.<String>from(Collections.<String>emptyList()).isEmpty());
assertFalse(FluentIterable.<String>from(Lists.newArrayList("foo")).isEmpty());
}
public void testToList() {
assertEquals(Lists.newArrayList(1, 2, 3, 4), fluent(1, 2, 3, 4).toList());
}
public void testToList_empty() {
assertTrue(fluent().toList().isEmpty());
}
public void testToSortedList_withComparator() {
assertEquals(
Lists.newArrayList(4, 3, 2, 1),
fluent(4, 1, 3, 2).toSortedList(Ordering.<Integer>natural().reverse()));
}
public void testToSortedList_withDuplicates() {
assertEquals(
Lists.newArrayList(4, 3, 1, 1),
fluent(1, 4, 1, 3).toSortedList(Ordering.<Integer>natural().reverse()));
}
public void testToSet() {
assertThat(fluent(1, 2, 3, 4).toSet()).containsExactly(1, 2, 3, 4).inOrder();
}
public void testToSet_removeDuplicates() {
assertThat(fluent(1, 2, 1, 2).toSet()).containsExactly(1, 2).inOrder();
}
public void testToSet_empty() {
assertTrue(fluent().toSet().isEmpty());
}
public void testToSortedSet() {
assertThat(fluent(1, 4, 2, 3).toSortedSet(Ordering.<Integer>natural().reverse()))
.containsExactly(4, 3, 2, 1)
.inOrder();
}
public void testToSortedSet_removeDuplicates() {
assertThat(fluent(1, 4, 1, 3).toSortedSet(Ordering.<Integer>natural().reverse()))
.containsExactly(4, 3, 1)
.inOrder();
}
public void testToMultiset() {
assertThat(fluent(1, 2, 1, 3, 2, 4).toMultiset()).containsExactly(1, 1, 2, 2, 3, 4).inOrder();
}
public void testToMultiset_empty() {
assertThat(fluent().toMultiset()).isEmpty();
}
public void testToMap() {
assertThat(fluent(1, 2, 3).toMap(Functions.toStringFunction()).entrySet())
.containsExactly(immutableEntry(1, "1"), immutableEntry(2, "2"), immutableEntry(3, "3"))
.inOrder();
}
public void testToMap_nullKey() {
assertThrows(
NullPointerException.class, () -> fluent(1, null, 2).toMap(Functions.constant("foo")));
}
public void testToMap_nullValue() {
assertThrows(NullPointerException.class, () -> fluent(1, 2, 3).toMap(Functions.constant(null)));
}
public void testIndex() {
ImmutableListMultimap<Integer, String> expected =
ImmutableListMultimap.<Integer, String>builder()
.putAll(3, "one", "two")
.put(5, "three")
.put(4, "four")
.build();
ImmutableListMultimap<Integer, String> index =
FluentIterable.from(asList("one", "two", "three", "four"))
.index(
new Function<String, Integer>() {
@Override
public Integer apply(String input) {
return input.length();
}
});
assertEquals(expected, index);
}
public void testIndex_nullKey() {
assertThrows(
NullPointerException.class,
() -> {
ImmutableListMultimap<Object, Integer> unused =
fluent(1, 2, 3).index(Functions.constant(null));
});
}
public void testIndex_nullValue() {
assertThrows(
NullPointerException.class,
() -> {
ImmutableListMultimap<String, Integer> unused =
fluent(1, null, 2).index(Functions.constant("foo"));
});
}
public void testUniqueIndex() {
ImmutableMap<Integer, String> expected = ImmutableMap.of(3, "two", 5, "three", 4, "four");
ImmutableMap<Integer, String> index =
FluentIterable.from(asList("two", "three", "four"))
.uniqueIndex(
new Function<String, Integer>() {
@Override
public Integer apply(String input) {
return input.length();
}
});
assertEquals(expected, index);
}
public void testUniqueIndex_duplicateKey() {
assertThrows(
IllegalArgumentException.class,
() -> {
ImmutableMap<Integer, String> unused =
FluentIterable.from(asList("one", "two", "three", "four"))
.uniqueIndex(
new Function<String, Integer>() {
@Override
public Integer apply(String input) {
return input.length();
}
});
});
}
public void testUniqueIndex_nullKey() {
assertThrows(
NullPointerException.class, () -> fluent(1, 2, 3).uniqueIndex(Functions.constant(null)));
}
public void testUniqueIndex_nullValue() {
assertThrows(
NullPointerException.class,
() -> {
ImmutableMap<Object, Integer> unused =
fluent(1, null, 2)
.uniqueIndex(
new Function<Integer, Object>() {
@Override
public Object apply(@Nullable Integer input) {
return String.valueOf(input);
}
});
});
}
public void testCopyInto_list() {
assertThat(fluent(1, 3, 5).copyInto(Lists.newArrayList(1, 2)))
.containsExactly(1, 2, 1, 3, 5)
.inOrder();
}
public void testCopyInto_set() {
assertThat(fluent(1, 3, 5).copyInto(newHashSet(1, 2))).containsExactly(1, 2, 3, 5);
}
public void testCopyInto_setAllDuplicates() {
assertThat(fluent(1, 3, 5).copyInto(newHashSet(1, 2, 3, 5))).containsExactly(1, 2, 3, 5);
}
public void testCopyInto_nonCollection() {
ArrayList<Integer> list = Lists.newArrayList(1, 2, 3);
ArrayList<Integer> iterList = Lists.newArrayList(9, 8, 7);
Iterable<Integer> iterable =
new Iterable<Integer>() {
@Override
public Iterator<Integer> iterator() {
return iterList.iterator();
}
};
assertThat(FluentIterable.from(iterable).copyInto(list))
.containsExactly(1, 2, 3, 9, 8, 7)
.inOrder();
}
public void testJoin() {
assertEquals("2,1,3,4", fluent(2, 1, 3, 4).join(Joiner.on(",")));
}
public void testJoin_empty() {
assertEquals("", fluent().join(Joiner.on(",")));
}
public void testGet() {
assertEquals("a", FluentIterable.from(Lists.newArrayList("a", "b", "c")).get(0));
assertEquals("b", FluentIterable.from(Lists.newArrayList("a", "b", "c")).get(1));
assertEquals("c", FluentIterable.from(Lists.newArrayList("a", "b", "c")).get(2));
}
public void testGet_outOfBounds() {
assertThrows(
IndexOutOfBoundsException.class,
() -> FluentIterable.from(Lists.newArrayList("a", "b", "c")).get(-1));
assertThrows(
IndexOutOfBoundsException.class,
() -> FluentIterable.from(Lists.newArrayList("a", "b", "c")).get(3));
}
/*
* Full and proper black-box testing of a Stream-returning method is extremely involved, and is
* overkill when nearly all Streams are produced using well-tested JDK calls. So, we cheat and
* just test that the toArray() contents are as expected.
*/
public void testStream() {
assertThat(FluentIterable.of().stream()).isEmpty();
assertThat(FluentIterable.of("a").stream()).containsExactly("a");
assertThat(FluentIterable.of(1, 2, 3).stream().filter(n -> n > 1)).containsExactly(2, 3);
}
private static void assertCanIterateAgain(Iterable<?> iterable) {
for (Object unused : iterable) {
// do nothing
}
}
private static FluentIterable<Integer> fluent(Integer... elements) {
return FluentIterable.from(Lists.newArrayList(elements));
}
private static Iterable<String> iterable(String... elements) {
List<String> list = asList(elements);
return new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return list.iterator();
}
};
}
}
|
RepeatedStringValueOfWildcardFunction
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
|
{
"start": 20890,
"end": 22479
}
|
class ____ {
private final long offset;
private final RecordMetadata metadata;
private final ProduceRequestResult result;
private final Callback callback;
private final TopicPartition tp;
public Completion(long offset,
RecordMetadata metadata,
ProduceRequestResult result,
Callback callback,
TopicPartition tp) {
this.metadata = metadata;
this.offset = offset;
this.result = result;
this.callback = callback;
this.tp = tp;
}
public void complete(RuntimeException e) {
if (e == null) {
result.set(offset, RecordBatch.NO_TIMESTAMP, null);
} else {
result.set(-1, RecordBatch.NO_TIMESTAMP, index -> e);
}
if (callback != null) {
if (e == null)
callback.onCompletion(metadata, null);
else
callback.onCompletion(new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1), e);
}
result.done();
}
}
public List<KafkaMetric> addedMetrics() {
return Collections.unmodifiableList(addedMetrics);
}
@Override
public void registerMetricForSubscription(KafkaMetric metric) {
addedMetrics.add(metric);
}
@Override
public void unregisterMetricFromSubscription(KafkaMetric metric) {
addedMetrics.remove(metric);
}
}
|
Completion
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/jpa/event/spi/CallbackRegistry.java
|
{
"start": 242,
"end": 1075
}
|
interface ____ {
/**
* Do we have any registered callbacks of the given type for the given entity?
*
* @param entityClass The entity Class to check against
* @param callbackType The type of callback to look for
*
* @return {@code true} indicates there are already registered callbacks of
* that type for that class; {@code false} indicates there are not.
*/
boolean hasRegisteredCallbacks(Class<?> entityClass, CallbackType callbackType);
void preCreate(Object entity);
void postCreate(Object entity);
boolean preUpdate(Object entity);
void postUpdate(Object entity);
void preRemove(Object entity);
void postRemove(Object entity);
boolean postLoad(Object entity);
/**
* Signals that the CallbackRegistry will no longer be used.
* In particular, it is important to release references to
|
CallbackRegistry
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/path/JSONPath_deepScan_test2.java
|
{
"start": 1055,
"end": 1248
}
|
class ____ {
public int id;
public Department() {
}
public Department(int id) {
this.id = id;
}
}
}
|
Department
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/main/java/org/springframework/boot/test/context/ImportsContextCustomizer.java
|
{
"start": 5330,
"end": 5816
}
|
class
____ other = (ImportsContextCustomizer) obj;
return this.key.equals(other.key);
}
@Override
public int hashCode() {
return this.key.hashCode();
}
@Override
public String toString() {
return new ToStringCreator(this).append("key", this.key).toString();
}
/**
* {@link Configuration @Configuration} registered to trigger the
* {@link ImportsSelector}.
*/
@Configuration(proxyBeanMethods = false)
@Import(ImportsSelector.class)
static
|
ImportsContextCustomizer
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/windowing/delta/DeltaFunction.java
|
{
"start": 1305,
"end": 1651
}
|
interface ____<DATA> extends Serializable {
/**
* Calculates the delta between two given data points.
*
* @param oldDataPoint the old data point.
* @param newDataPoint the new data point.
* @return the delta between the two given points.
*/
double getDelta(DATA oldDataPoint, DATA newDataPoint);
}
|
DeltaFunction
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java
|
{
"start": 1271,
"end": 1388
}
|
class ____ {
static final AbstractClientProvider PROVIDER = new TarballClientProvider();
}
private static
|
Client
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/utils/ChunkedBytesStream.java
|
{
"start": 1157,
"end": 1781
}
|
class ____ be configured to not push skip() to
* input stream. We may want to avoid pushing this to input stream because it's implementation maybe inefficient,
* e.g. the case of ZstdInputStream which allocates a new buffer from buffer pool, per skip call.
* - Unlike {@link java.io.BufferedInputStream}, which allocates an intermediate buffer, this uses a buffer supplier to
* create the intermediate buffer.
* - Unlike {@link java.io.BufferedInputStream}, this implementation does not support {@link InputStream#mark(int)} and
* {@link InputStream#markSupported()} will return false.
* <p>
* Note that:
* - this
|
could
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.