language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeErrorTests.java | {
"start": 831,
"end": 1980
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(WildcardLikeTests.parameters());
}
@Override
protected Stream<List<DataType>> testCandidates(List<TestCaseSupplier> cases, Set<List<DataType>> valid) {
/*
* We can't support certain signatures, and it's safe not to test them because
* you can't even build them.... The building comes directly from the parser
* and can only make certain types.
*/
return super.testCandidates(cases, valid).filter(sig -> sig.get(1) == DataType.KEYWORD)
.filter(sig -> sig.size() > 2 && sig.get(2) == DataType.BOOLEAN);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return RLikeTests.buildRLike(logger, source, args);
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(false, validPerPosition, signature, (v, p) -> "string"));
}
}
| WildcardLikeErrorTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/FormatFeatureAcceptSingleTest.java | {
"start": 3435,
"end": 3615
} | class ____ {
public final List<Role> roles;
RolesInListWithBuilder(List<Role> roles) {
this.roles = roles;
}
static | RolesInListWithBuilder |
java | apache__camel | components/camel-test/camel-test-spring-junit5/src/main/java/org/apache/camel/test/spring/junit5/ShutdownTimeout.java | {
"start": 1579,
"end": 1870
} | interface ____ {
/**
* The shutdown timeout to set on the {@code CamelContext}(s). Defaults to {@code 10} seconds.
*/
int value() default 10;
/**
* The time unit that {@link #value()} is in.
*/
TimeUnit timeUnit() default TimeUnit.SECONDS;
}
| ShutdownTimeout |
java | hibernate__hibernate-orm | hibernate-graalvm/src/test/java/org/hibernate/graalvm/internal/StaticClassListsTest.java | {
"start": 1380,
"end": 2124
} | class ____ {
@ParameterizedTest
@EnumSource(TypesNeedingRuntimeInitialization_Category.class)
void containsAllExpectedClasses(TypesNeedingRuntimeInitialization_Category category) {
assertThat( StaticClassLists.typesNeedingRuntimeInitialization() )
.containsAll( category.classes().collect( Collectors.toSet() ) );
}
@Test
void meta_noMissingTestCategory() {
assertThat( Arrays.stream( TypesNeedingRuntimeInitialization_Category.values() ).flatMap( TypesNeedingRuntimeInitialization_Category::classes ) )
.as( "If this fails, a category is missing in " + TypesNeedingRuntimeInitialization_Category.class )
.contains( StaticClassLists.typesNeedingRuntimeInitialization() );
}
}
| TypesNeedingRuntimeInitialization |
java | apache__flink | flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/embedded/EmbeddedPythonWindowOperator.java | {
"start": 2547,
"end": 6251
} | class ____<K, IN, OUT, W extends Window>
extends AbstractOneInputEmbeddedPythonFunctionOperator<IN, OUT>
implements Triggerable<K, W> {
private static final long serialVersionUID = 1L;
/** For serializing the window in checkpoints. */
private final TypeSerializer<W> windowSerializer;
/** The TypeInformation of the key. */
private transient TypeInformation<K> keyTypeInfo;
private transient PythonTypeUtils.DataConverter<K, Object> keyConverter;
private transient WindowContextImpl windowContext;
private transient WindowTimerContextImpl windowTimerContext;
public EmbeddedPythonWindowOperator(
Configuration config,
DataStreamPythonFunctionInfo pythonFunctionInfo,
TypeInformation<IN> inputTypeInfo,
TypeInformation<OUT> outputTypeInfo,
TypeSerializer<W> windowSerializer) {
super(config, pythonFunctionInfo, inputTypeInfo, outputTypeInfo);
this.windowSerializer = checkNotNull(windowSerializer);
}
@Override
public void open() throws Exception {
keyTypeInfo = ((RowTypeInfo) this.getInputTypeInfo()).getTypeAt(0);
keyConverter = PythonTypeUtils.TypeInfoToDataConverter.typeInfoDataConverter(keyTypeInfo);
InternalTimerService<W> internalTimerService =
getInternalTimerService("window-timers", windowSerializer, this);
windowContext = new WindowContextImpl(internalTimerService);
windowTimerContext = new WindowTimerContextImpl(internalTimerService);
super.open();
}
@Override
public List<FlinkFnApi.UserDefinedDataStreamFunction> createUserDefinedFunctionsProto() {
return ProtoUtils.createUserDefinedDataStreamStatefulFunctionProtos(
getPythonFunctionInfo(),
getRuntimeContext(),
getJobParameters(),
keyTypeInfo,
inBatchExecutionMode(getKeyedStateBackend()),
config.get(PYTHON_METRIC_ENABLED),
config.get(PYTHON_PROFILE_ENABLED),
hasSideOutput,
config.get(STATE_CACHE_SIZE),
config.get(MAP_STATE_READ_CACHE_SIZE),
config.get(MAP_STATE_WRITE_CACHE_SIZE));
}
@Override
public void onEventTime(InternalTimer<K, W> timer) throws Exception {
collector.setAbsoluteTimestamp(timer.getTimestamp());
invokeUserFunction(timer);
}
@Override
public void onProcessingTime(InternalTimer<K, W> timer) throws Exception {
collector.eraseTimestamp();
invokeUserFunction(timer);
}
@Override
public Object getFunctionContext() {
return windowContext;
}
@Override
public Object getTimerContext() {
return windowTimerContext;
}
@Override
public <T> DataStreamPythonFunctionOperator<T> copy(
DataStreamPythonFunctionInfo pythonFunctionInfo, TypeInformation<T> outputTypeInfo) {
return new EmbeddedPythonWindowOperator<>(
config, pythonFunctionInfo, getInputTypeInfo(), outputTypeInfo, windowSerializer);
}
private void invokeUserFunction(InternalTimer<K, W> timer) throws Exception {
windowTimerContext.timer = timer;
PyIterator results =
(PyIterator)
interpreter.invokeMethod("operation", "on_timer", timer.getTimestamp());
while (results.hasNext()) {
OUT result = outputDataConverter.toInternal(results.next());
collector.collect(result);
}
results.close();
windowTimerContext.timer = null;
}
private | EmbeddedPythonWindowOperator |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java | {
"start": 770,
"end": 2551
} | class ____ extends PostOptimizationPhasePlanVerifier<LogicalPlan> {
public static final LogicalVerifier LOCAL_INSTANCE = new LogicalVerifier(true);
public static final LogicalVerifier INSTANCE = new LogicalVerifier(false);
private LogicalVerifier(boolean isLocal) {
super(isLocal);
}
@Override
public void checkPlanConsistency(LogicalPlan optimizedPlan, Failures failures, Failures depFailures) {
List<BiConsumer<LogicalPlan, Failures>> checkers = new ArrayList<>();
optimizedPlan.forEachUp(p -> {
PlanConsistencyChecker.checkPlan(p, depFailures);
if (failures.hasFailures() == false) {
if (p instanceof PostOptimizationVerificationAware pova
&& (pova instanceof PostOptimizationVerificationAware.CoordinatorOnly && isLocal) == false) {
pova.postOptimizationVerification(failures);
}
if (p instanceof PostOptimizationPlanVerificationAware popva) {
checkers.add(popva.postOptimizationPlanVerification());
}
p.forEachExpression(ex -> {
if (ex instanceof PostOptimizationVerificationAware va
&& (va instanceof PostOptimizationVerificationAware.CoordinatorOnly && isLocal) == false) {
va.postOptimizationVerification(failures);
}
if (ex instanceof PostOptimizationPlanVerificationAware vpa) {
vpa.postOptimizationPlanVerification().accept(p, failures);
}
});
}
});
optimizedPlan.forEachUp(p -> checkers.forEach(checker -> checker.accept(p, failures)));
}
}
| LogicalVerifier |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/CloseableIterator.java | {
"start": 5130,
"end": 5984
} | class ____<E> implements CloseableIterator<E> {
@Nonnull private final Iterator<E> delegate;
private final AutoCloseable close;
IteratorAdapter(@Nonnull Iterator<E> delegate, AutoCloseable close) {
this.delegate = delegate;
this.close = close;
}
@Override
public boolean hasNext() {
return delegate.hasNext();
}
@Override
public E next() {
return delegate.next();
}
@Override
public void remove() {
delegate.remove();
}
@Override
public void forEachRemaining(Consumer<? super E> action) {
delegate.forEachRemaining(action);
}
@Override
public void close() throws Exception {
close.close();
}
}
}
| IteratorAdapter |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/ContextHierarchy.java | {
"start": 6270,
"end": 6448
} | class ____ {}
*
* @ContextHierarchy(
* @ContextConfiguration(name = "child", locations = "/test-user-config.xml", inheritLocations = false)
* )
* public | BaseTests |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/RestrictedApiChecker.java | {
"start": 3191,
"end": 3530
} | class ____ extends BugChecker
implements MethodInvocationTreeMatcher,
NewClassTreeMatcher,
AnnotationTreeMatcher,
MemberReferenceTreeMatcher {
/**
* Validates a {@code @RestrictedApi} annotation and that the declared restriction makes sense.
*
* <p>The other match functions in this | RestrictedApiChecker |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/NamedGraphCreator.java | {
"start": 344,
"end": 562
} | interface ____ {
<T> RootGraphImplementor<T> createEntityGraph(
Function<Class<T>, EntityDomainType<?>> entityDomainClassResolver,
Function<String, EntityDomainType<?>> entityDomainNameResolver);
}
| NamedGraphCreator |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/Tokenization.java | {
"start": 1148,
"end": 1228
} | class ____ implements NamedXContentObject, NamedWriteable {
public | Tokenization |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Field.java | {
"start": 6786,
"end": 6947
} | interface ____ extends MessageElement.XField {
ParseField VALUE = new ParseField("value");
ParseField SHORT = new ParseField("short");
}
}
| XField |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/RackAwarenessStreamsPartitionAssignorTest.java | {
"start": 26364,
"end": 30104
} | class ____ {
private final TaskId taskId;
private final Map<String, String> activeClientTags;
private final List<Map<String, String>> standbysClientTags;
ClientTagDistribution(final TaskId taskId) {
this.taskId = taskId;
this.activeClientTags = new HashMap<>();
this.standbysClientTags = new ArrayList<>();
}
void addActiveTags(final Map<String, String> activeClientTags) {
if (!this.activeClientTags.isEmpty()) {
throw new IllegalStateException("Found multiple active tasks for " + taskId + ", this should not happen");
}
this.activeClientTags.putAll(activeClientTags);
}
void addStandbyTags(final Map<String, String> standbyClientTags) {
this.standbysClientTags.add(standbyClientTags);
}
@Override
public String toString() {
return "ClientTagDistribution{" +
"taskId=" + taskId +
", activeClientTags=" + activeClientTags +
", standbysClientTags=" + standbysClientTags +
'}';
}
}
/**
* Helper for building the input to createMockAdminClient in cases where we don't care about the actual offsets
* @param changelogTopics The names of all changelog topics in the topology
* @param topicsNumPartitions The number of partitions for the corresponding changelog topic, such that the number
* of partitions of the ith topic in changelogTopics is given by the ith element of topicsNumPartitions
*/
private static Map<TopicPartition, Long> getTopicPartitionOffsetsMap(final List<String> changelogTopics,
final List<Integer> topicsNumPartitions) {
if (changelogTopics.size() != topicsNumPartitions.size()) {
throw new IllegalStateException("Passed in " + changelogTopics.size() + " changelog topic names, but " +
topicsNumPartitions.size() + " different numPartitions for the topics");
}
final Map<TopicPartition, Long> changelogEndOffsets = new HashMap<>();
for (int i = 0; i < changelogTopics.size(); ++i) {
final String topic = changelogTopics.get(i);
final int numPartitions = topicsNumPartitions.get(i);
for (int partition = 0; partition < numPartitions; ++partition) {
changelogEndOffsets.put(new TopicPartition(topic, partition), Long.MAX_VALUE);
}
}
return changelogEndOffsets;
}
private static ConsumerPartitionAssignor.Subscription getSubscription(final ProcessId processId,
final Collection<TaskId> prevActiveTasks,
final Map<String, String> clientTags) {
return new ConsumerPartitionAssignor.Subscription(
singletonList("source1"),
new SubscriptionInfo(LATEST_SUPPORTED_VERSION, LATEST_SUPPORTED_VERSION, processId, null,
getTaskOffsetSums(prevActiveTasks), (byte) 0, 0, clientTags).encode()
);
}
// Stub offset sums for when we only care about the prev/standby task sets, not the actual offsets
private static Map<TaskId, Long> getTaskOffsetSums(final Collection<TaskId> activeTasks) {
final Map<TaskId, Long> taskOffsetSums = activeTasks.stream().collect(Collectors.toMap(t -> t, t -> Task.LATEST_OFFSET));
taskOffsetSums.putAll(EMPTY_TASKS.stream().collect(Collectors.toMap(t -> t, t -> 0L)));
return taskOffsetSums;
}
}
| ClientTagDistribution |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/cache/ClassLevelDirtiesContextTestNGTests.java | {
"start": 9415,
"end": 9633
} | class ____ extends
ClassLevelDirtiesContextWithCleanMethodsAndDefaultModeTestCase {
}
@DirtiesContext(classMode = ClassMode.AFTER_CLASS)
static | InheritedClassLevelDirtiesContextWithCleanMethodsAndDefaultModeTestCase |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/JDKDateDeserializers.java | {
"start": 185,
"end": 1437
} | class ____
{
private final static HashSet<String> _utilClasses = new HashSet<String>();
static {
_utilClasses.add("java.util.Calendar");
_utilClasses.add("java.util.GregorianCalendar");
_utilClasses.add("java.util.Date");
}
public static ValueDeserializer<?> find(Class<?> rawType, String clsName)
{
if (_utilClasses.contains(clsName)) {
// Start with the most common type
if (rawType == java.util.Calendar.class) {
return new JavaUtilCalendarDeserializer();
}
if (rawType == java.util.Date.class) {
return JavaUtilDateDeserializer.instance;
}
if (rawType == java.util.GregorianCalendar.class) {
return new JavaUtilCalendarDeserializer(GregorianCalendar.class);
}
}
return null;
}
public static boolean hasDeserializerFor(Class<?> rawType) {
return _utilClasses.contains(rawType.getName());
}
/*
/**********************************************************************
/* Deserializer implementations for Date types
/**********************************************************************
*/
}
| JDKDateDeserializers |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/StreamBufferingEncoder.java | {
"start": 15015,
"end": 15696
} | class ____ extends Frame {
final ByteBuf data;
final int padding;
final boolean endOfStream;
DataFrame(ByteBuf data, int padding, boolean endOfStream, ChannelPromise promise) {
super(promise);
this.data = data;
this.padding = padding;
this.endOfStream = endOfStream;
}
@Override
void release(Throwable t) {
super.release(t);
ReferenceCountUtil.safeRelease(data);
}
@Override
void send(ChannelHandlerContext ctx, int streamId) {
writeData(ctx, streamId, data, padding, endOfStream, promise);
}
}
}
| DataFrame |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/testcontainers/OracleTestContainer.java | {
"start": 1036,
"end": 2129
} | class ____ {
private static final String DB_NAME = "mybatis_test";
private static final String USERNAME = "u";
private static final String PASSWORD = "p";
private static final String DRIVER = "oracle.jdbc.driver.OracleDriver";
@Container
private static final OracleContainer INSTANCE = initContainer();
private static OracleContainer initContainer() {
@SuppressWarnings("resource")
var container = new OracleContainer("gvenzl/oracle-free:slim-faststart").withDatabaseName(DB_NAME)
.withUsername(USERNAME).withPassword(PASSWORD);
container.start();
return container;
}
public static DataSource getUnpooledDataSource() {
return new UnpooledDataSource(OracleTestContainer.DRIVER, INSTANCE.getJdbcUrl(), OracleTestContainer.USERNAME,
OracleTestContainer.PASSWORD);
}
public static PooledDataSource getPooledDataSource() {
return new PooledDataSource(OracleTestContainer.DRIVER, INSTANCE.getJdbcUrl(), OracleTestContainer.USERNAME,
OracleTestContainer.PASSWORD);
}
private OracleTestContainer() {
}
}
| OracleTestContainer |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/client/observation/ClientRequestObservationContext.java | {
"start": 1279,
"end": 2256
} | class ____ extends RequestReplySenderContext<ClientHttpRequest, ClientHttpResponse> {
private @Nullable String uriTemplate;
/**
* Create an observation context for {@link ClientHttpRequest} observations.
* @param request the HTTP client request
*/
public ClientRequestObservationContext(ClientHttpRequest request) {
super(ClientRequestObservationContext::setRequestHeader);
setCarrier(request);
}
private static void setRequestHeader(@Nullable ClientHttpRequest request, String name, String value) {
if (request != null) {
request.getHeaders().set(name, value);
}
}
/**
* Set the URI template used for the current client exchange.
*/
public void setUriTemplate(@Nullable String uriTemplate) {
this.uriTemplate = uriTemplate;
}
/**
* Return the URI template used for the current client exchange, {@code null} if none was used.
*/
public @Nullable String getUriTemplate() {
return this.uriTemplate;
}
}
| ClientRequestObservationContext |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/rsocket/service/RSocketRequestValues.java | {
"start": 1419,
"end": 3861
} | class ____ {
private final @Nullable String route;
private final Object[] routeVariables;
private final Map<Object, MimeType> metadata;
private final @Nullable Object payloadValue;
private final @Nullable Publisher<?> payload;
private final @Nullable ParameterizedTypeReference<?> payloadElementType;
public RSocketRequestValues(
@Nullable String route, @Nullable List<Object> routeVariables, @Nullable MetadataHelper metadataHelper,
@Nullable Object payloadValue, @Nullable Publisher<?> payload,
@Nullable ParameterizedTypeReference<?> payloadElementType) {
this.route = route;
this.routeVariables = (routeVariables != null ? routeVariables.toArray() : new Object[0]);
this.metadata = (metadataHelper != null ? metadataHelper.toMap() : Collections.emptyMap());
this.payloadValue = payloadValue;
this.payload = payload;
this.payloadElementType = payloadElementType;
}
/**
* Return the route value for
* {@link org.springframework.messaging.rsocket.RSocketRequester#route(String, Object...) route}.
*/
public @Nullable String getRoute() {
return this.route;
}
/**
* Return the route variables for
* {@link org.springframework.messaging.rsocket.RSocketRequester#route(String, Object...) route}.
*/
public Object[] getRouteVariables() {
return this.routeVariables;
}
/**
* Return the metadata entries for
* {@link org.springframework.messaging.rsocket.RSocketRequester.RequestSpec#metadata(Object, MimeType)}.
*/
public Map<Object, MimeType> getMetadata() {
return this.metadata;
}
/**
* Return the request payload as a value to be serialized, if set.
* <p>This is mutually exclusive with {@link #getPayload()}.
* Only one of the two or neither is set.
*/
public @Nullable Object getPayloadValue() {
return this.payloadValue;
}
/**
* Return the request payload as a Publisher.
* <p>This is mutually exclusive with {@link #getPayloadValue()}.
* Only one of the two or neither is set.
*/
public @Nullable Publisher<?> getPayload() {
return this.payload;
}
/**
* Return the element type for a {@linkplain #getPayload() Publisher payload}.
*/
public @Nullable ParameterizedTypeReference<?> getPayloadElementType() {
return this.payloadElementType;
}
public static Builder builder(@Nullable String route) {
return new Builder(route);
}
/**
* Builder for {@link RSocketRequestValues}.
*/
public static final | RSocketRequestValues |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/validation/DataBinderTests.java | {
"start": 88053,
"end": 88308
} | class ____ {
private List<Integer> integerList;
public List<Integer> getIntegerList() {
return integerList;
}
public void setIntegerList(List<Integer> integerList) {
this.integerList = integerList;
}
}
private static | BeanWithIntegerList |
java | netty__netty | transport-native-kqueue/src/test/java/io/netty/channel/kqueue/KQueueSocketStartTlsTest.java | {
"start": 903,
"end": 1156
} | class ____ extends SocketStartTlsTest {
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return KQueueSocketTestPermutation.INSTANCE.socket();
}
}
| KQueueSocketStartTlsTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java | {
"start": 10138,
"end": 11462
} | class ____ {
private final HammingDistanceInterface function;
@SuppressWarnings("unchecked")
public Hamming(ScoreScript scoreScript, Object queryVector, String fieldName) {
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
if (field.getElementType() == DenseVectorFieldMapper.ElementType.FLOAT || field.getElementType() == ElementType.BFLOAT16) {
throw new IllegalArgumentException("hamming distance is only supported for byte or bit vectors");
}
if (queryVector instanceof List) {
function = new ByteHammingDistance(scoreScript, field, (List<Number>) queryVector);
} else if (queryVector instanceof String s) {
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
function = new ByteHammingDistance(scoreScript, field, parsedQueryVector);
} else {
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
}
}
public double hamming() {
return function.hamming();
}
}
// Calculate l2 norm (Manhattan distance) between a query's dense vector and documents' dense vectors
public | Hamming |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumDb2ComponentBuilderFactory.java | {
"start": 1881,
"end": 5169
} | interface ____ extends ComponentBuilder<DebeziumDb2Component> {
/**
* Additional properties for debezium components in case they can't be
* set directly on the camel configurations (e.g: setting Kafka Connect
* properties needed by Debezium engine, for example setting
* KafkaOffsetBackingStore), the properties have to be prefixed with
* additionalProperties.. E.g:
* additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro. This is a multi-value option with prefix: additionalProperties.
*
* The option is a: <code>java.util.Map&lt;java.lang.String,
* java.lang.Object&gt;</code> type.
*
* Group: common
*
* @param additionalProperties the value to set
* @return the dsl builder
*/
default DebeziumDb2ComponentBuilder additionalProperties(java.util.Map<java.lang.String, java.lang.Object> additionalProperties) {
doSetProperty("additionalProperties", additionalProperties);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default DebeziumDb2ComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allow pre-configured Configurations to be set.
*
* The option is a:
* <code>org.apache.camel.component.debezium.db2.configuration.Db2ConnectorEmbeddedDebeziumConfiguration</code> type.
*
* Group: consumer
*
* @param configuration the value to set
* @return the dsl builder
*/
default DebeziumDb2ComponentBuilder configuration(org.apache.camel.component.debezium.db2.configuration.Db2ConnectorEmbeddedDebeziumConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* The Converter | DebeziumDb2ComponentBuilder |
java | alibaba__nacos | istio/src/main/java/com/alibaba/nacos/istio/common/WatchedStatus.java | {
"start": 744,
"end": 2252
} | class ____ {
private String type;
private boolean lastAckOrNack;
private Set<String> lastSubscribe;
private String latestVersion;
private String latestNonce;
private String ackedVersion;
private String ackedNonce;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getLatestVersion() {
return latestVersion;
}
public void setLatestVersion(String latestVersion) {
this.latestVersion = latestVersion;
}
public String getLatestNonce() {
return latestNonce;
}
public void setLatestNonce(String latestNonce) {
this.latestNonce = latestNonce;
}
public String getAckedVersion() {
return ackedVersion;
}
public void setAckedVersion(String ackedVersion) {
this.ackedVersion = ackedVersion;
}
public String getAckedNonce() {
return ackedNonce;
}
public void setAckedNonce(String ackedNonce) {
this.ackedNonce = ackedNonce;
}
public boolean isLastAckOrNack() {
return lastAckOrNack;
}
public void setLastAckOrNack(boolean lastAckOrNack) {
this.lastAckOrNack = lastAckOrNack;
}
public Set<String> getLastSubscribe() {
return lastSubscribe;
}
public void setLastSubscribe(Set<String> lastSubscribe) {
this.lastSubscribe = new HashSet<>(lastSubscribe);
}
}
| WatchedStatus |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedproperties/simple/SimpleMapper.java | {
"start": 545,
"end": 1598
} | interface ____ {
SimpleMapper MAPPER = Mappers.getMapper( SimpleMapper.class );
@Mappings( { @Mapping( target = "longValue", source = "props.longValue" ),
@Mapping( target = "publicLongValue", source = "props.publicLongValue" ),
@Mapping( target = "intValue", source = "props.intValue" ),
@Mapping( target = "doubleValue", source = "props.doubleValue" ),
@Mapping( target = "floatValue", source = "props.floatValue" ),
@Mapping( target = "shortValue", source = "props.shortValue" ),
@Mapping( target = "charValue", source = "props.charValue" ),
@Mapping( target = "byteValue", source = "props.byteValue" ),
@Mapping( target = "booleanValue", source = "props.booleanValue" ),
@Mapping( target = "byteArray", source = "props.byteArray" ),
@Mapping( target = "stringValue", source = "props.stringValue" ) } )
TargetObject toTargetObject(SourceRoot sourceRoot);
@InheritInverseConfiguration
SourceRoot toSourceRoot(TargetObject targetObject);
}
| SimpleMapper |
java | quarkusio__quarkus | extensions/oidc-common/runtime/src/main/java/io/quarkus/oidc/common/runtime/config/OidcClientCommonConfig.java | {
"start": 10217,
"end": 11083
} | interface ____ {
/**
* The CredentialsProvider bean name, which should only be set if more than one CredentialsProvider is
* registered
*/
Optional<String> name();
/**
* The CredentialsProvider keyring name.
* The keyring name is only required when the CredentialsProvider being
* used requires the keyring name to look up the secret, which is often the case when a CredentialsProvider is
* shared by multiple extensions to retrieve credentials from a more dynamic source like a vault instance or secret
* manager
*/
Optional<String> keyringName();
/**
* The CredentialsProvider client secret key
*/
Optional<String> key();
}
}
}
| Provider |
java | greenrobot__greendao | DaoGenerator/src/org/greenrobot/greendao/generator/ToManyBase.java | {
"start": 852,
"end": 971
} | class ____ to-many relationship from source entities to target entities. */
@SuppressWarnings("unused")
public abstract | for |
java | spring-projects__spring-boot | module/spring-boot-webflux-test/src/test/java/org/springframework/boot/webflux/test/autoconfigure/WebFluxTypeExcludeFilterTests.java | {
"start": 7243,
"end": 7440
} | class ____ implements WebFilter {
@Override
public Mono<Void> filter(ServerWebExchange serverWebExchange, WebFilterChain webFilterChain) {
return Mono.empty();
}
}
static | ExampleWebFilter |
java | spring-projects__spring-boot | module/spring-boot-rsocket/src/main/java/org/springframework/boot/rsocket/netty/NettyRSocketServer.java | {
"start": 1361,
"end": 2976
} | class ____ implements RSocketServer {
private static final Log logger = LogFactory.getLog(NettyRSocketServer.class);
private final Mono<CloseableChannel> starter;
private final @Nullable Duration lifecycleTimeout;
private @Nullable CloseableChannel channel;
public NettyRSocketServer(Mono<CloseableChannel> starter, @Nullable Duration lifecycleTimeout) {
Assert.notNull(starter, "'starter' must not be null");
this.starter = starter;
this.lifecycleTimeout = lifecycleTimeout;
}
@Override
public @Nullable InetSocketAddress address() {
if (this.channel != null) {
return this.channel.address();
}
return null;
}
@Override
public void start() throws RSocketServerException {
this.channel = block(this.starter, this.lifecycleTimeout);
InetSocketAddress address = address();
Assert.state(address != null, "'address' must not be null");
logger.info("Netty RSocket started on port " + address.getPort());
startDaemonAwaitThread(this.channel);
}
private void startDaemonAwaitThread(@Nullable CloseableChannel channel) {
if (channel == null) {
return;
}
Thread awaitThread = new Thread(() -> channel.onClose().block(), "rsocket");
awaitThread.setContextClassLoader(getClass().getClassLoader());
awaitThread.setDaemon(false);
awaitThread.start();
}
@Override
public void stop() throws RSocketServerException {
if (this.channel != null) {
this.channel.dispose();
this.channel = null;
}
}
private <T> @Nullable T block(Mono<T> mono, @Nullable Duration timeout) {
return (timeout != null) ? mono.block(timeout) : mono.block();
}
}
| NettyRSocketServer |
java | google__guava | android/guava-tests/test/com/google/common/collect/ConcurrentHashMultisetBasherTest.java | {
"start": 3942,
"end": 6029
} | class ____ implements Callable<int[]> {
private final ConcurrentHashMultiset<String> multiset;
private final ImmutableList<String> keys;
private final Random random = new Random();
private MutateTask(ConcurrentHashMultiset<String> multiset, ImmutableList<String> keys) {
this.multiset = multiset;
this.keys = keys;
}
@Override
public int[] call() throws Exception {
int iterations = 100000;
int nKeys = keys.size();
int[] deltas = new int[nKeys];
Operation[] operations = Operation.values();
for (int i = 0; i < iterations; i++) {
int keyIndex = random.nextInt(nKeys);
String key = keys.get(keyIndex);
Operation op = operations[random.nextInt(operations.length)];
switch (op) {
case ADD:
{
int delta = random.nextInt(10);
multiset.add(key, delta);
deltas[keyIndex] += delta;
break;
}
case SET_COUNT:
{
int newValue = random.nextInt(3);
int oldValue = multiset.setCount(key, newValue);
deltas[keyIndex] += newValue - oldValue;
break;
}
case SET_COUNT_IF:
{
int newValue = random.nextInt(3);
int oldValue = multiset.count(key);
if (multiset.setCount(key, oldValue, newValue)) {
deltas[keyIndex] += newValue - oldValue;
}
break;
}
case REMOVE:
{
int delta = random.nextInt(6); // [0, 5]
int oldValue = multiset.remove(key, delta);
deltas[keyIndex] -= min(delta, oldValue);
break;
}
case REMOVE_EXACTLY:
{
int delta = random.nextInt(5); // [0, 4]
if (multiset.removeExactly(key, delta)) {
deltas[keyIndex] -= delta;
}
break;
}
}
}
return deltas;
}
private | MutateTask |
java | apache__flink | flink-test-utils-parent/flink-test-utils/src/test/java/org/apache/flink/networking/EchoServer.java | {
"start": 2595,
"end": 3782
} | class ____ extends Thread implements AutoCloseable {
private final PrintWriter output;
private final BufferedReader input;
private volatile boolean close;
private Exception threadException;
public EchoWorkerThread(Socket clientSocket, int socketTimeout) throws IOException {
output = new PrintWriter(clientSocket.getOutputStream(), true);
input = new BufferedReader(new InputStreamReader(clientSocket.getInputStream()));
clientSocket.setSoTimeout(socketTimeout);
}
@Override
public void run() {
try {
String inputLine;
while (!close && (inputLine = input.readLine()) != null) {
output.println(inputLine);
}
} catch (IOException e) {
threadException = e;
}
}
@Override
public void close() throws Exception {
close = true;
if (threadException != null) {
throw threadException;
}
input.close();
output.close();
this.join();
}
}
}
| EchoWorkerThread |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/seda/DirectRequestReplyAndSedaInOnlyTest.java | {
"start": 1081,
"end": 2160
} | class ____ extends ContextTestSupport {
@Test
public void testInOut() throws Exception {
getMockEndpoint("mock:log").expectedBodiesReceived("Logging: Bye World");
String out = template.requestBody("direct:start", "Hello World", String.class);
assertEquals("Bye World", out);
log.info("Got reply {}", out);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// send the message as InOnly to SEDA as we want to continue
// routing
// (as we don't want to do request/reply over SEDA)
// In EIP patterns the WireTap pattern is what this would be
from("direct:start").transform(constant("Bye World")).to(ExchangePattern.InOnly, "seda:log");
from("seda:log").transform(body().prepend("Logging: ")).to("log:log", "mock:log");
}
};
}
}
| DirectRequestReplyAndSedaInOnlyTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java | {
"start": 13905,
"end": 25768
} | class ____ extends MappedFieldType {
private final boolean enabled;
private SourceFieldType(boolean enabled) {
super(NAME, IndexType.NONE, enabled, Collections.emptyMap());
this.enabled = enabled;
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public ValueFetcher valueFetcher(SearchExecutionContext context, String format) {
throw new IllegalArgumentException("Cannot fetch values for internal field [" + name() + "].");
}
@Override
public Query existsQuery(SearchExecutionContext context) {
throw new QueryShardException(context, "The _source field is not searchable");
}
@Override
public Query termQuery(Object value, SearchExecutionContext context) {
throw new QueryShardException(context, "The _source field is not searchable");
}
@Override
public BlockLoader blockLoader(BlockLoaderContext blContext) {
if (enabled) {
return new SourceFieldBlockLoader();
}
return BlockLoader.CONSTANT_NULLS;
}
}
// nullable for bwc reasons - TODO: fold this into serializeMode
private final @Nullable Mode mode;
private final boolean serializeMode;
private final boolean sourceModeIsNoop;
private final Explicit<Boolean> enabled;
/** indicates whether the source will always exist and be complete, for use by features like the update API */
private final boolean complete;
private final String[] includes;
private final String[] excludes;
private final SourceFilter sourceFilter;
private SourceFieldMapper(
Mode mode,
Explicit<Boolean> enabled,
String[] includes,
String[] excludes,
boolean serializeMode,
boolean sourceModeIsNoop
) {
super(new SourceFieldType((enabled.explicit() && enabled.value()) || (enabled.explicit() == false && mode != Mode.DISABLED)));
this.mode = mode;
this.enabled = enabled;
this.sourceFilter = buildSourceFilter(includes, excludes);
this.includes = includes;
this.excludes = excludes;
this.complete = stored() && sourceFilter == null;
this.serializeMode = serializeMode;
this.sourceModeIsNoop = sourceModeIsNoop;
}
private static SourceFilter buildSourceFilter(String[] includes, String[] excludes) {
if (CollectionUtils.isEmpty(includes) && CollectionUtils.isEmpty(excludes)) {
return null;
}
return new SourceFilter(includes, excludes);
}
private boolean stored() {
if (enabled.explicit() || mode == null) {
return enabled.value();
}
return mode == Mode.STORED;
}
public boolean enabled() {
if (enabled.explicit()) {
return enabled.value();
}
if (mode != null) {
return mode != Mode.DISABLED;
}
return enabled.value();
}
public boolean isComplete() {
return complete;
}
@Override
public void preParse(DocumentParserContext context) throws IOException {
XContentType contentType = context.sourceToParse().getXContentType();
final var originalSource = context.sourceToParse().source();
final var storedSource = stored() ? removeSyntheticVectorFields(context.mappingLookup(), originalSource, contentType) : null;
final var adaptedStoredSource = applyFilters(context.mappingLookup(), storedSource, contentType, false);
if (adaptedStoredSource != null) {
final BytesRef ref = adaptedStoredSource.toBytesRef();
context.doc().add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length));
}
if (context.indexSettings().isRecoverySourceEnabled() == false) {
// Recovery source is disabled; skip adding recovery source fields.
return;
}
if (context.indexSettings().isRecoverySourceSyntheticEnabled()) {
assert isSynthetic() : "Recovery source should not be disabled for non-synthetic sources";
// Synthetic source recovery is enabled; omit the full recovery source.
// Instead, store only the size of the uncompressed original source.
// This size is used by LuceneSyntheticSourceChangesSnapshot to manage memory usage
// when loading batches of synthetic sources during recovery.
context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_SIZE_NAME, originalSource.length()));
} else if (stored() == false || adaptedStoredSource != storedSource) {
// If the source is missing (due to synthetic source or disabled mode)
// or has been altered (via source filtering), store a reduced recovery source.
// This includes the original source with synthetic vector fields removed for operation-based recovery.
var recoverySource = removeSyntheticVectorFields(context.mappingLookup(), originalSource, contentType).toBytesRef();
context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, recoverySource.bytes, recoverySource.offset, recoverySource.length));
context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1));
}
}
/**
* Removes the synthetic vector fields (_inference and synthetic vector fields) from the {@code _source} if it is present.
* These fields are regenerated at query or snapshot recovery time using stored fields and doc values.
*
* <p>For details on how the metadata is re-added, see:</p>
* <ul>
* <li>{@link SearchBasedChangesSnapshot#addSyntheticFields(Source, int)}</li>
* <li>{@link FetchSourcePhase#getProcessor(FetchContext)}</li>
* </ul>
*/
private BytesReference removeSyntheticVectorFields(
MappingLookup mappingLookup,
@Nullable BytesReference originalSource,
@Nullable XContentType contentType
) throws IOException {
if (originalSource == null) {
return null;
}
Set<String> excludes = new HashSet<>();
if (InferenceMetadataFieldsMapper.isEnabled(mappingLookup) && mappingLookup.inferenceFields().isEmpty() == false) {
excludes.add(InferenceMetadataFieldsMapper.NAME);
}
if (excludes.isEmpty() && mappingLookup.syntheticVectorFields().isEmpty()) {
return originalSource;
}
BytesStreamOutput streamOutput = new BytesStreamOutput();
XContentBuilder builder = new XContentBuilder(contentType.xContent(), streamOutput);
try (
XContentParser parser = XContentHelper.createParserNotCompressed(
XContentParserConfiguration.EMPTY.withFiltering(Set.of(), excludes, true),
originalSource,
contentType
)
) {
if ((parser.currentToken() == null) && (parser.nextToken() == null)) {
return originalSource;
}
// Removes synthetic vector fields from the source while preserving empty parent objects,
// ensuring that the fields can later be rehydrated in their original locations.
removeSyntheticVectorFields(builder.generator(), parser, "", mappingLookup.syntheticVectorFields());
return BytesReference.bytes(builder);
}
}
@Nullable
public BytesReference applyFilters(
MappingLookup mappingLookup,
@Nullable BytesReference originalSource,
@Nullable XContentType contentType,
boolean removeMetadataFields
) throws IOException {
if (stored() == false || originalSource == null) {
return null;
}
var modSourceFilter = sourceFilter;
if (removeMetadataFields
&& InferenceMetadataFieldsMapper.isEnabled(mappingLookup)
&& mappingLookup.inferenceFields().isEmpty() == false) {
/*
* Removes the {@link InferenceMetadataFieldsMapper} content from the {@code _source}.
*/
String[] modExcludes = new String[excludes != null ? excludes.length + 1 : 1];
if (excludes != null) {
System.arraycopy(excludes, 0, modExcludes, 0, excludes.length);
}
modExcludes[modExcludes.length - 1] = InferenceMetadataFieldsMapper.NAME;
modSourceFilter = new SourceFilter(includes, modExcludes);
}
if (modSourceFilter != null) {
// Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data
return Source.fromBytes(originalSource, contentType).filter(modSourceFilter).internalSourceRef();
} else {
return originalSource;
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public FieldMapper.Builder getMergeBuilder() {
return new Builder(null, Settings.EMPTY, sourceModeIsNoop, false, serializeMode).init(this);
}
public boolean isSynthetic() {
return mode == Mode.SYNTHETIC;
}
/**
* Caution: this function is not aware of the legacy "mappings._source.mode" parameter that some legacy indices might use. You should
* prefer to get information about synthetic source from {@link MapperBuilderContext}.
*/
public static boolean isSynthetic(IndexSettings indexSettings) {
return IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == SourceFieldMapper.Mode.SYNTHETIC;
}
public static boolean isStored(IndexSettings indexSettings) {
return IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == Mode.STORED;
}
public boolean isDisabled() {
return mode == Mode.DISABLED;
}
public boolean isStored() {
return mode == null || mode == Mode.STORED;
}
public static boolean onOrAfterDeprecateModeVersion(IndexVersion version) {
return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER)
|| version.between(IndexVersions.V8_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0);
}
private static void removeSyntheticVectorFields(
XContentGenerator destination,
XContentParser parser,
String fullPath,
Set<String> patchFullPaths
) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
token = parser.nextToken();
fullPath = fullPath + (fullPath.isEmpty() ? "" : ".") + fieldName;
if (patchFullPaths.contains(fullPath)) {
parser.skipChildren();
return;
}
destination.writeFieldName(fieldName);
}
switch (token) {
case START_ARRAY -> {
destination.writeStartArray();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
removeSyntheticVectorFields(destination, parser, fullPath, patchFullPaths);
}
destination.writeEndArray();
}
case START_OBJECT -> {
destination.writeStartObject();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
removeSyntheticVectorFields(destination, parser, fullPath, patchFullPaths);
}
destination.writeEndObject();
}
default -> // others are simple:
destination.copyCurrentEvent(parser);
}
}
}
| SourceFieldType |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/support/ResolvableType.java | {
"start": 47405,
"end": 48262
} | class ____ implements VariableResolver {
private final TypeVariable<?>[] variables;
private final ResolvableType[] generics;
public TypeVariablesVariableResolver(TypeVariable<?>[] variables, ResolvableType[] generics) {
this.variables = variables;
this.generics = generics;
}
@Override
public ResolvableType resolveVariable(TypeVariable<?> variable) {
for (int i = 0; i < this.variables.length; i++) {
if (TypeWrapper.unwrap(this.variables[i]).equals(TypeWrapper.unwrap(variable))) {
return this.generics[i];
}
}
return null;
}
@Override
public Object getSource() {
return this.generics;
}
}
private static final | TypeVariablesVariableResolver |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/filter/TestMapFiltering.java | {
"start": 4901,
"end": 9685
} | class ____ extends LinkedHashMap<String,String> {
public StringMap497 add(String key, String value) {
put(key, value);
return this;
}
}
/*
/**********************************************************
/* Unit tests
/**********************************************************
*/
final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testMapFilteringViaProps() throws Exception
{
FilterProvider prov = new SimpleFilterProvider().addFilter("filterX",
SimpleBeanPropertyFilter.filterOutAllExcept("b"));
String json = MAPPER.writer(prov).writeValueAsString(new MapBean());
assertEquals(a2q("{'values':{'b':5}}"), json);
}
@Test
public void testMapFilteringViaClass() throws Exception
{
FilteredBean bean = new FilteredBean();
bean.put("a", 4);
bean.put("b", 3);
FilterProvider prov = new SimpleFilterProvider().addFilter("filterForMaps",
SimpleBeanPropertyFilter.filterOutAllExcept("b"));
String json = MAPPER.writer(prov).writeValueAsString(bean);
assertEquals(a2q("{'b':3}"), json);
}
// [databind#527]
@Test
public void testNonNullValueMapViaProp() throws IOException
{
String json = MAPPER.writeValueAsString(new NoNullValuesMapContainer()
.add("a", "foo")
.add("b", null)
.add("c", "bar"));
assertEquals(a2q("{'stuff':{'a':'foo','c':'bar'}}"), json);
}
// [databind#522]
@Test
public void testMapFilteringWithAnnotations() throws Exception
{
FilterProvider prov = new SimpleFilterProvider().addFilter("filterX",
new TestMapFilter());
String json = MAPPER.writer(prov).writeValueAsString(new MapBean());
// a=1 should become a=2
assertEquals(a2q("{'values':{'a':2}}"), json);
// and then one without annotation as contrast
json = MAPPER.writer(prov).writeValueAsString(new MapBeanNoOffset());
assertEquals(a2q("{'values':{'a':1}}"), json);
}
// [databind#527]
@Test
public void testMapNonNullValue() throws IOException
{
String json = MAPPER.writeValueAsString(new NoNullsStringMap()
.add("a", "foo")
.add("b", null)
.add("c", "bar"));
assertEquals(a2q("{'a':'foo','c':'bar'}"), json);
}
// [databind#527]
@Test
public void testMapNonEmptyValue() throws IOException
{
String json = MAPPER.writeValueAsString(new NoEmptyStringsMap()
.add("a", "foo")
.add("b", "bar")
.add("c", ""));
assertEquals(a2q("{'a':'foo','b':'bar'}"), json);
}
// Test to ensure absent content of AtomicReference handled properly
// [databind#527]
@Test
public void testMapAbsentValue() throws IOException
{
String json = MAPPER.writeValueAsString(new NoAbsentStringMap()
.add("a", "foo")
.add("b", null));
assertEquals(a2q("{'a':'foo'}"), json);
}
// [databind#527]
@Test
public void testMapWithOnlyEmptyValues() throws IOException
{
String json;
// First, non empty:
json = MAPPER.writeValueAsString(new Wrapper497(new StringMap497()
.add("a", "123")));
assertEquals(a2q("{'values':{'a':'123'}}"), json);
// then empty
json = MAPPER.writeValueAsString(new Wrapper497(new StringMap497()
.add("a", "")
.add("b", null)));
assertEquals(a2q("{}"), json);
}
@Test
public void testMapViaGlobalNonEmpty() throws Exception
{
// basic Map<String,String> subclass:
ObjectMapper mapper = jsonMapperBuilder()
.changeDefaultPropertyInclusion(incl -> incl
.withContentInclusion(JsonInclude.Include.NON_EMPTY))
.build();
assertEquals(a2q("{'a':'b'}"), mapper.writeValueAsString(
new StringMap497()
.add("x", "")
.add("a", "b")
));
}
@Test
public void testMapViaTypeOverride() throws Exception
{
// basic Map<String,String> subclass:
ObjectMapper mapper = jsonMapperBuilder()
.withConfigOverride(Map.class,
o -> o.setInclude(JsonInclude.Value.empty()
.withContentInclusion(JsonInclude.Include.NON_EMPTY)))
.build();
assertEquals(a2q("{'a':'b'}"), mapper.writeValueAsString(
new StringMap497()
.add("foo", "")
.add("a", "b")
));
}
}
| StringMap497 |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/telemetry/MicrometerWebSocketsStandaloneTest.java | {
"start": 5692,
"end": 5845
} | class ____ {
@OnOpen()
public void onOpen() {
Log.info("client onOpen");
}
}
}
| ErroneousClient_OnConnectErrorHandler |
java | micronaut-projects__micronaut-core | core-reactive/src/main/java/io/micronaut/core/async/publisher/Publishers.java | {
"start": 19523,
"end": 19953
} | interface ____<T, R> {
/**
* Maps next result.
*
* @param result The next value.
* @return The mapped value.
*/
@NonNull
R map(@NonNull T result);
/**
* Supplies an empty value if there is no next value.
*
* @return The result.
*/
@NonNull
R supplyEmpty();
}
/**
* Marker | MapOrSupplyEmpty |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FuturesGetCheckedIllegalExceptionTypeTest.java | {
"start": 3490,
"end": 3608
} | class ____ extends Exception {
public OtherParameterTypeException(int it) {}
}
public | OtherParameterTypeException |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/util/OpenHashSet.java | {
"start": 962,
"end": 4540
} | class ____<T> {
private static final int INT_PHI = 0x9E3779B9;
final float loadFactor;
int mask;
int size;
int maxSize;
T[] keys;
public OpenHashSet() {
this(16, 0.75f);
}
/**
* Creates an OpenHashSet with the initial capacity and load factor of 0.75f.
* @param capacity the initial capacity
*/
public OpenHashSet(int capacity) {
this(capacity, 0.75f);
}
@SuppressWarnings("unchecked")
public OpenHashSet(int capacity, float loadFactor) {
this.loadFactor = loadFactor;
int c = Pow2.roundToPowerOfTwo(capacity);
this.mask = c - 1;
this.maxSize = (int)(loadFactor * c);
this.keys = (T[])new Object[c];
}
public boolean add(T value) {
final T[] a = keys;
final int m = mask;
int pos = mix(value.hashCode()) & m;
T curr = a[pos];
if (curr != null) {
if (curr.equals(value)) {
return false;
}
for (;;) {
pos = (pos + 1) & m;
curr = a[pos];
if (curr == null) {
break;
}
if (curr.equals(value)) {
return false;
}
}
}
a[pos] = value;
if (++size >= maxSize) {
rehash();
}
return true;
}
public boolean remove(T value) {
T[] a = keys;
int m = mask;
int pos = mix(value.hashCode()) & m;
T curr = a[pos];
if (curr == null) {
return false;
}
if (curr.equals(value)) {
return removeEntry(pos, a, m);
}
for (;;) {
pos = (pos + 1) & m;
curr = a[pos];
if (curr == null) {
return false;
}
if (curr.equals(value)) {
return removeEntry(pos, a, m);
}
}
}
boolean removeEntry(int pos, T[] a, int m) {
size--;
int last;
int slot;
T curr;
for (;;) {
last = pos;
pos = (pos + 1) & m;
for (;;) {
curr = a[pos];
if (curr == null) {
a[last] = null;
return true;
}
slot = mix(curr.hashCode()) & m;
if (last <= pos ? last >= slot || slot > pos : last >= slot && slot > pos) {
break;
}
pos = (pos + 1) & m;
}
a[last] = curr;
}
}
@SuppressWarnings("unchecked")
void rehash() {
T[] a = keys;
int i = a.length;
int newCap = i << 1;
int m = newCap - 1;
T[] b = (T[])new Object[newCap];
for (int j = size; j-- != 0; ) {
while (a[--i] == null) { } // NOPMD
int pos = mix(a[i].hashCode()) & m;
if (b[pos] != null) {
for (;;) {
pos = (pos + 1) & m;
if (b[pos] == null) {
break;
}
}
}
b[pos] = a[i];
}
this.mask = m;
this.maxSize = (int)(newCap * loadFactor);
this.keys = b;
}
static int mix(int x) {
final int h = x * INT_PHI;
return h ^ (h >>> 16);
}
public Object[] keys() {
return keys; // NOPMD
}
public int size() {
return size;
}
}
| OpenHashSet |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java | {
"start": 2130,
"end": 4782
} | class ____ implements ToXContentFragment {
private final Counts counts;
private final Set<String> versions;
private final OsStats os;
private final ProcessStats process;
private final JvmStats jvm;
private final FsInfo.Path fs;
private final Set<PluginRuntimeInfo> plugins;
private final NetworkTypes networkTypes;
private final DiscoveryTypes discoveryTypes;
private final PackagingTypes packagingTypes;
private final IngestStats ingestStats;
private final IndexPressureStats indexPressureStats;
ClusterStatsNodes(List<ClusterStatsNodeResponse> nodeResponses) {
this.versions = new HashSet<>();
this.plugins = new HashSet<>();
ClusterFsStatsDeduplicator deduplicator = new ClusterFsStatsDeduplicator(nodeResponses.size());
List<NodeInfo> nodeInfos = new ArrayList<>(nodeResponses.size());
List<NodeStats> nodeStats = new ArrayList<>(nodeResponses.size());
for (ClusterStatsNodeResponse nodeResponse : nodeResponses) {
nodeInfos.add(nodeResponse.nodeInfo());
nodeStats.add(nodeResponse.nodeStats());
this.versions.add(nodeResponse.nodeInfo().getVersion());
this.plugins.addAll(nodeResponse.nodeInfo().getInfo(PluginsAndModules.class).getPluginInfos());
TransportAddress publishAddress = nodeResponse.nodeInfo().getInfo(TransportInfo.class).address().publishAddress();
final InetAddress inetAddress = publishAddress.address().getAddress();
deduplicator.add(inetAddress, nodeResponse.nodeStats().getFs());
}
this.fs = deduplicator.getTotal();
this.counts = new Counts(nodeInfos);
this.os = new OsStats(nodeInfos, nodeStats);
this.process = new ProcessStats(nodeStats);
this.jvm = new JvmStats(nodeInfos, nodeStats);
this.networkTypes = new NetworkTypes(nodeInfos);
this.discoveryTypes = new DiscoveryTypes(nodeInfos);
this.packagingTypes = new PackagingTypes(nodeInfos);
this.ingestStats = new IngestStats(nodeStats);
this.indexPressureStats = new IndexPressureStats(nodeStats);
}
public Counts getCounts() {
return this.counts;
}
public Set<String> getVersions() {
return versions;
}
public OsStats getOs() {
return os;
}
public ProcessStats getProcess() {
return process;
}
public JvmStats getJvm() {
return jvm;
}
public FsInfo.Path getFs() {
return fs;
}
public Set<PluginRuntimeInfo> getPlugins() {
return plugins;
}
static final | ClusterStatsNodes |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java | {
"start": 18643,
"end": 21610
} | class ____ implements BootstrapCheck {
static final long LIMIT = 1 << 18;
@Override
public BootstrapCheckResult check(final BootstrapContext context) {
// we only enforce the check if a store is allowed to use mmap at all
if (IndexModule.NODE_STORE_ALLOW_MMAP.get(context.settings())) {
if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) {
final String message = String.format(
Locale.ROOT,
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
getMaxMapCount(),
LIMIT
);
return BootstrapCheckResult.failure(message);
} else {
return BootstrapCheckResult.success();
}
} else {
return BootstrapCheckResult.success();
}
}
// visible for testing
long getMaxMapCount() {
return getMaxMapCount(LogManager.getLogger(BootstrapChecks.class));
}
// visible for testing
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
if (rawProcSysVmMaxMapCount != null) {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn(() -> "unable to parse vm.max_map_count [" + rawProcSysVmMaxMapCount + "]", e);
}
}
} catch (final IOException e) {
logger.warn(() -> "I/O exception while trying to read [" + path + "]", e);
}
return -1;
}
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
private static Path getProcSysVmMaxMapCountPath() {
return PathUtils.get("/proc/sys/vm/max_map_count");
}
// visible for testing
BufferedReader getBufferedReader(final Path path) throws IOException {
return Files.newBufferedReader(path);
}
// visible for testing
static String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
return bufferedReader.readLine();
}
// visible for testing
static long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
return Long.parseLong(procSysVmMaxMapCount);
}
@Override
public ReferenceDocs referenceDocs() {
return ReferenceDocs.BOOTSTRAP_CHECK_MAXIMUM_MAP_COUNT;
}
}
static | MaxMapCountCheck |
java | google__dagger | javatests/dagger/hilt/android/ViewModelSavedStateOwnerTest.java | {
"start": 9716,
"end": 9949
} | class ____ extends Hilt_ViewModelSavedStateOwnerTest_TestFragment {
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
}
@HiltViewModel
static | TestFragment |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/test/java/org/springframework/boot/micrometer/metrics/autoconfigure/jvm/JvmMetricsAutoConfigurationTests.java | {
"start": 2399,
"end": 6501
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withBean(MeterRegistry.class, () -> new SimpleMeterRegistry())
.withConfiguration(AutoConfigurations.of(JvmMetricsAutoConfiguration.class));
@Test
void autoConfiguresJvmMetrics() {
this.contextRunner.run(assertMetricsBeans());
}
@Test
void allowsCustomJvmGcMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmGcMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customJvmGcMetrics")));
}
@Test
void allowsCustomJvmHeapPressureMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmHeapPressureMetricsConfiguration.class)
.run(assertMetricsBeans()
.andThen((context) -> assertThat(context).hasBean("customJvmHeapPressureMetrics")));
}
@Test
void allowsCustomJvmMemoryMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmMemoryMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customJvmMemoryMetrics")));
}
@Test
void allowsCustomJvmThreadMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmThreadMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customJvmThreadMetrics")));
}
@Test
void allowsCustomClassLoaderMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomClassLoaderMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customClassLoaderMetrics")));
}
@Test
void allowsCustomJvmInfoMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmInfoMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customJvmInfoMetrics")));
}
@Test
void allowsCustomJvmCompilationMetricsToBeUsed() {
this.contextRunner.withUserConfiguration(CustomJvmCompilationMetricsConfiguration.class)
.run(assertMetricsBeans().andThen((context) -> assertThat(context).hasBean("customJvmCompilationMetrics")));
}
@Test
@EnabledForJreRange(min = JRE.JAVA_21)
void autoConfiguresJvmMetricsWithVirtualThreadsMetrics() {
this.contextRunner.run(assertMetricsBeans()
.andThen((context) -> assertThat(context).hasSingleBean(getVirtualThreadMetricsClass())));
}
@Test
@EnabledForJreRange(min = JRE.JAVA_21)
void allowCustomVirtualThreadMetricsToBeUsed() {
Class<MeterBinder> virtualThreadMetricsClass = getVirtualThreadMetricsClass();
this.contextRunner
.withBean("customVirtualThreadMetrics", virtualThreadMetricsClass,
() -> BeanUtils.instantiateClass(virtualThreadMetricsClass))
.run(assertMetricsBeans()
.andThen((context) -> assertThat(context).hasSingleBean(getVirtualThreadMetricsClass())
.hasBean("customVirtualThreadMetrics")));
}
@Test
@EnabledForJreRange(min = JRE.JAVA_21)
void shouldRegisterVirtualThreadMetricsRuntimeHints() {
RuntimeHints hints = new RuntimeHints();
new JvmMetricsAutoConfiguration.VirtualThreadMetricsRuntimeHintsRegistrar().registerHints(hints,
getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.reflection()
.onType(TypeReference.of(getVirtualThreadMetricsClass()))
.withMemberCategories(MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS)).accepts(hints);
}
private ContextConsumer<AssertableApplicationContext> assertMetricsBeans() {
return (context) -> assertThat(context).hasSingleBean(JvmGcMetrics.class)
.hasSingleBean(JvmHeapPressureMetrics.class)
.hasSingleBean(JvmMemoryMetrics.class)
.hasSingleBean(JvmThreadMetrics.class)
.hasSingleBean(ClassLoaderMetrics.class)
.hasSingleBean(JvmInfoMetrics.class)
.hasSingleBean(JvmCompilationMetrics.class);
}
@SuppressWarnings("unchecked")
private static Class<MeterBinder> getVirtualThreadMetricsClass() {
return (Class<MeterBinder>) ClassUtils
.resolveClassName("io.micrometer.java21.instrument.binder.jdk.VirtualThreadMetrics", null);
}
@Configuration(proxyBeanMethods = false)
static | JvmMetricsAutoConfigurationTests |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/filter/FilterContext.java | {
"start": 854,
"end": 1096
} | class ____ {
private String consumerGroup;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
}
| FilterContext |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/condition/RequestConditionHolder.java | {
"start": 1016,
"end": 1525
} | class ____ also an implementation of {@code RequestCondition}, effectively it
* decorates the held request condition and allows it to be combined and compared
* with other request conditions in a type and null safe way.
*
* <p>When two {@code RequestConditionHolder} instances are combined or compared
* with each other, it is expected the conditions they hold are of the same type.
* If they are not, a {@link ClassCastException} is raised.
*
* @author Rossen Stoyanchev
* @since 5.0
*/
public final | is |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/dialect/postgis/PostgisTest.java | {
"start": 3030,
"end": 3527
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
private String name;
private Point<C2D> location;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Point getLocation() {
return location;
}
public void setLocation(Point location) {
this.location = location;
}
}
}
| Event |
java | elastic__elasticsearch | client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java | {
"start": 4330,
"end": 21773
} | class ____ extends RestClientTestCase {
private static final Log logger = LogFactory.getLog(RestClientSingleHostTests.class);
private ExecutorService exec = Executors.newFixedThreadPool(1);
private RestClient restClient;
private Header[] defaultHeaders;
private Node node;
private CloseableHttpAsyncClient httpClient;
private HostsTrackingFailureListener failureListener;
private boolean strictDeprecationMode;
@Before
public void createRestClient() {
httpClient = mockHttpClient(exec);
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
node = new Node(new HttpHost("localhost", 9200));
failureListener = new HostsTrackingFailureListener();
strictDeprecationMode = randomBoolean();
restClient = new RestClient(
this.httpClient,
defaultHeaders,
singletonList(node),
null,
failureListener,
NodeSelector.ANY,
strictDeprecationMode,
false,
false
);
}
@SuppressWarnings("unchecked")
static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) {
CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class);
when(
httpClient.<HttpResponse>execute(
any(HttpAsyncRequestProducer.class),
any(HttpAsyncResponseConsumer.class),
any(HttpClientContext.class),
nullable(FutureCallback.class)
)
).thenAnswer((Answer<Future<HttpResponse>>) invocationOnMock -> {
final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
final FutureCallback<HttpResponse> futureCallback = (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[3];
// Call the callback asynchronous to better simulate how async http client works
return exec.submit(() -> {
if (futureCallback != null) {
try {
HttpResponse httpResponse = responseOrException(requestProducer);
futureCallback.completed(httpResponse);
} catch (Exception e) {
futureCallback.failed(e);
}
return null;
}
return responseOrException(requestProducer);
});
});
return httpClient;
}
private static HttpResponse responseOrException(HttpAsyncRequestProducer requestProducer) throws Exception {
final HttpUriRequest request = (HttpUriRequest) requestProducer.generateRequest();
final HttpHost httpHost = requestProducer.getTarget();
// return the desired status code or exception depending on the path
switch (request.getURI().getPath()) {
case "/soe":
throw new SocketTimeoutException(httpHost.toString());
case "/coe":
throw new ConnectTimeoutException(httpHost.toString());
case "/ioe":
throw new IOException(httpHost.toString());
case "/closed":
throw new ConnectionClosedException();
case "/handshake":
throw new SSLHandshakeException("");
case "/uri":
throw new URISyntaxException("", "");
case "/runtime":
throw new RuntimeException();
default:
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
final HttpResponse httpResponse = new BasicHttpResponse(statusLine);
// return the same body that was sent
if (request instanceof HttpEntityEnclosingRequest) {
HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
if (entity != null) {
assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable());
httpResponse.setEntity(entity);
}
}
// return the same headers that were sent
httpResponse.setHeaders(request.getAllHeaders());
return httpResponse;
}
}
/**
* Shutdown the executor so we don't leak threads into other test runs.
*/
@After
public void shutdownExec() {
exec.shutdown();
}
/**
* Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client
*/
@SuppressWarnings("unchecked")
public void testInternalHttpRequest() throws Exception {
ArgumentCaptor<HttpAsyncRequestProducer> requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class);
int times = 0;
for (String httpMethod : getHttpMethods()) {
HttpUriRequest expectedRequest = performRandomRequest(httpMethod);
verify(httpClient, times(++times)).<HttpResponse>execute(
requestArgumentCaptor.capture(),
any(HttpAsyncResponseConsumer.class),
any(HttpClientContext.class),
nullable(FutureCallback.class)
);
HttpUriRequest actualRequest = (HttpUriRequest) requestArgumentCaptor.getValue().generateRequest();
assertEquals(expectedRequest.getURI(), actualRequest.getURI());
assertEquals(expectedRequest.getClass(), actualRequest.getClass());
assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders());
if (expectedRequest instanceof HttpEntityEnclosingRequest) {
HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity();
if (expectedEntity != null) {
HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity();
assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity));
}
}
}
}
/**
* End to end test for ok status codes
*/
public void testOkStatusCodes() throws Exception {
for (String method : getHttpMethods()) {
for (int okStatusCode : getOkStatusCodes()) {
Response response = performRequestSyncOrAsync(restClient, new Request(method, "/" + okStatusCode));
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
}
}
failureListener.assertNotCalled();
}
/**
* End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests
*/
public void testErrorStatusCodes() throws Exception {
for (String method : getHttpMethods()) {
Set<Integer> expectedIgnores = new HashSet<>();
String ignoreParam = "";
if (HttpHead.METHOD_NAME.equals(method)) {
expectedIgnores.add(404);
}
if (randomBoolean()) {
int numIgnores = randomIntBetween(1, 3);
for (int i = 0; i < numIgnores; i++) {
Integer code = randomFrom(getAllErrorStatusCodes());
expectedIgnores.add(code);
ignoreParam += code;
if (i < numIgnores - 1) {
ignoreParam += ",";
}
}
}
// error status codes should cause an exception to be thrown
for (int errorStatusCode : getAllErrorStatusCodes()) {
try {
Request request = new Request(method, "/" + errorStatusCode);
if (false == ignoreParam.isEmpty()) {
// literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely
request.addParameter("ignore", ignoreParam);
}
Response response = restClient.performRequest(request);
if (expectedIgnores.contains(errorStatusCode)) {
// no exception gets thrown although we got an error status code, as it was configured to be ignored
assertEquals(errorStatusCode, response.getStatusLine().getStatusCode());
} else {
fail("request should have failed");
}
} catch (ResponseException e) {
if (expectedIgnores.contains(errorStatusCode)) {
throw e;
}
assertEquals(errorStatusCode, e.getResponse().getStatusLine().getStatusCode());
assertExceptionStackContainsCallingMethod(e);
}
if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) {
failureListener.assertNotCalled();
} else {
failureListener.assertCalled(singletonList(node));
}
}
}
}
public void testPerformRequestIOExceptions() throws Exception {
for (String method : getHttpMethods()) {
// IOExceptions should be let bubble up
try {
restClient.performRequest(new Request(method, "/ioe"));
fail("request should have failed");
} catch (IOException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
try {
restClient.performRequest(new Request(method, "/coe"));
fail("request should have failed");
} catch (ConnectTimeoutException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
try {
restClient.performRequest(new Request(method, "/soe"));
fail("request should have failed");
} catch (SocketTimeoutException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
try {
restClient.performRequest(new Request(method, "/closed"));
fail("request should have failed");
} catch (ConnectionClosedException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
try {
restClient.performRequest(new Request(method, "/handshake"));
fail("request should have failed");
} catch (SSLHandshakeException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
}
}
public void testPerformRequestRuntimeExceptions() throws Exception {
for (String method : getHttpMethods()) {
try {
restClient.performRequest(new Request(method, "/runtime"));
fail("request should have failed");
} catch (RuntimeException e) {
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
}
}
public void testPerformRequestExceptions() throws Exception {
for (String method : getHttpMethods()) {
try {
restClient.performRequest(new Request(method, "/uri"));
fail("request should have failed");
} catch (RuntimeException e) {
assertThat(e.getCause(), instanceOf(URISyntaxException.class));
// And we do all that so the thrown exception has our method in the stacktrace
assertExceptionStackContainsCallingMethod(e);
}
failureListener.assertCalled(singletonList(node));
}
}
/**
* End to end test for request and response body. Exercises the mock http client ability to send back
* whatever body it has received.
*/
public void testBody() throws Exception {
String body = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON);
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
for (int okStatusCode : getOkStatusCodes()) {
Request request = new Request(method, "/" + okStatusCode);
request.setEntity(entity);
Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
}
for (int errorStatusCode : getAllErrorStatusCodes()) {
Request request = new Request(method, "/" + errorStatusCode);
request.setEntity(entity);
try {
restClient.performRequest(request);
fail("request should have failed");
} catch (ResponseException e) {
Response response = e.getResponse();
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
assertExceptionStackContainsCallingMethod(e);
}
}
}
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
Request request = new Request(method, "/" + randomStatusCode(getRandom()));
request.setEntity(entity);
try {
performRequestSyncOrAsync(restClient, request);
fail("request should have failed");
} catch (UnsupportedOperationException e) {
assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
}
}
}
/**
* End to end test for request and response headers. Exercises the mock http client ability to send back
* whatever headers it has received.
*/
public void testHeaders() throws Exception {
for (String method : getHttpMethods()) {
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
final int statusCode = randomStatusCode(getRandom());
Request request = new Request(method, "/" + statusCode);
RequestOptions.Builder options = request.getOptions().toBuilder();
for (Header requestHeader : requestHeaders) {
options.addHeader(requestHeader.getName(), requestHeader.getValue());
}
request.setOptions(options);
Response esResponse;
try {
esResponse = performRequestSyncOrAsync(restClient, request);
} catch (ResponseException e) {
esResponse = e.getResponse();
}
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), Collections.<String>emptySet());
assertFalse(esResponse.hasWarnings());
}
}
public void testDeprecationWarnings() throws Exception {
String chars = randomAsciiAlphanumOfLength(5);
assertDeprecationWarnings(singletonList("poorly formatted " + chars), singletonList("poorly formatted " + chars));
assertDeprecationWarnings(singletonList(formatWarningWithoutDate(chars)), singletonList(chars));
assertDeprecationWarnings(singletonList(formatWarning(chars)), singletonList(chars));
assertDeprecationWarnings(
Arrays.asList(formatWarning(chars), "another one", "and another"),
Arrays.asList(chars, "another one", "and another")
);
assertDeprecationWarnings(Arrays.asList("ignorable one", "and another"), Arrays.asList("ignorable one", "and another"));
assertDeprecationWarnings(singletonList("exact"), singletonList("exact"));
assertDeprecationWarnings(Collections.<String>emptyList(), Collections.<String>emptyList());
String proxyWarning = "112 - \"network down\" \"Sat, 25 Aug 2012 23:34:45 GMT\"";
assertDeprecationWarnings(singletonList(proxyWarning), singletonList(proxyWarning));
}
private | RestClientSingleHostTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/MockAggregations.java | {
"start": 1104,
"end": 4177
} | class ____ {
public static StringTerms mockTerms(String name) {
return mockTerms(name, Collections.emptyList(), 0);
}
public static StringTerms mockTerms(String name, List<StringTerms.Bucket> buckets, long sumOfOtherDocCounts) {
StringTerms agg = mock(StringTerms.class);
when(agg.getName()).thenReturn(name);
doReturn(buckets).when(agg).getBuckets();
when(agg.getSumOfOtherDocCounts()).thenReturn(sumOfOtherDocCounts);
return agg;
}
public static StringTerms.Bucket mockTermsBucket(String key, InternalAggregations subAggs) {
StringTerms.Bucket bucket = mock(StringTerms.Bucket.class);
when(bucket.getKeyAsString()).thenReturn(key);
when(bucket.getAggregations()).thenReturn(subAggs);
return bucket;
}
public static InternalFilters mockFilters(String name) {
return mockFilters(name, Collections.emptyList());
}
public static InternalFilters mockFilters(String name, List<Filters.Bucket> buckets) {
InternalFilters agg = mock(InternalFilters.class);
when(agg.getName()).thenReturn(name);
doReturn(buckets).when(agg).getBuckets();
return agg;
}
public static InternalFilters.InternalBucket mockFiltersBucket(String key, long docCount, InternalAggregations subAggs) {
InternalFilters.InternalBucket bucket = mockFiltersBucket(key, docCount);
when(bucket.getAggregations()).thenReturn(subAggs);
return bucket;
}
public static InternalFilters.InternalBucket mockFiltersBucket(String key, long docCount) {
InternalFilters.InternalBucket bucket = mock(InternalFilters.InternalBucket.class);
when(bucket.getKeyAsString()).thenReturn(key);
when(bucket.getDocCount()).thenReturn(docCount);
return bucket;
}
public static InternalFilter mockFilter(String name, long docCount) {
InternalFilter agg = mock(InternalFilter.class);
when(agg.getName()).thenReturn(name);
when(agg.getDocCount()).thenReturn(docCount);
return agg;
}
public static InternalNumericMetricsAggregation.SingleValue mockSingleValue(String name, double value) {
InternalNumericMetricsAggregation.SingleValue agg = mock(InternalNumericMetricsAggregation.SingleValue.class);
when(agg.getName()).thenReturn(name);
when(agg.value()).thenReturn(value);
return agg;
}
public static InternalCardinality mockCardinality(String name, long value) {
InternalCardinality agg = mock(InternalCardinality.class);
when(agg.getName()).thenReturn(name);
when(agg.getValue()).thenReturn(value);
return agg;
}
public static InternalExtendedStats mockExtendedStats(String name, double variance, long count) {
InternalExtendedStats agg = mock(InternalExtendedStats.class);
when(agg.getName()).thenReturn(name);
when(agg.getVariance()).thenReturn(variance);
when(agg.getCount()).thenReturn(count);
return agg;
}
}
| MockAggregations |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpChangedRootDirReadLockFastExistCheckIT.java | {
"start": 882,
"end": 1200
} | class ____ extends FtpChangedRootDirReadLockIT {
@Override
protected String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}"
+ "/?password=admin&readLock=changed&readLockCheckInterval=1000&delete=true&fastExistsCheck=true";
}
}
| FtpChangedRootDirReadLockFastExistCheckIT |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java | {
"start": 9444,
"end": 10242
} | class ____ transient and when deserializing its value we also make it
// accessible
throw new RuntimeException(
"This should not happen since we call setAccesssible(true) in readObject."
+ " fields: "
+ field
+ " obj: "
+ pojo);
}
}
@Override
public T set(T pojo, F valueToSet) {
try {
@SuppressWarnings("unchecked")
final R inner = (R) field.get(pojo);
field.set(pojo, innerAccessor.set(inner, valueToSet));
return pojo;
} catch (IllegalAccessException iaex) {
// The Field | is |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/record/AbstractLegacyRecordBatchTest.java | {
"start": 1413,
"end": 11971
} | class ____ {
@Test
public void testSetLastOffsetCompressed() {
SimpleRecord[] simpleRecords = new SimpleRecord[] {
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes())
};
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME, simpleRecords);
long lastOffset = 500L;
long firstOffset = lastOffset - simpleRecords.length + 1;
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(lastOffset);
assertEquals(lastOffset, batch.lastOffset());
assertEquals(firstOffset, batch.baseOffset());
assertTrue(batch.isValid());
List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator());
assertEquals(1, recordBatches.size());
assertEquals(lastOffset, recordBatches.get(0).lastOffset());
long offset = firstOffset;
for (Record record : records.records())
assertEquals(offset++, record.offset());
}
/**
* The wrapper offset should be 0 in v0, but not in v1. However, the latter worked by accident and some versions of
* librdkafka now depend on it. So we support 0 for compatibility reasons, but the recommendation is to set the
* wrapper offset to the relative offset of the last record in the batch.
*/
@Test
public void testIterateCompressedRecordWithWrapperOffsetZero() {
for (byte magic : Arrays.asList(RecordBatch.MAGIC_VALUE_V0, RecordBatch.MAGIC_VALUE_V1)) {
SimpleRecord[] simpleRecords = new SimpleRecord[] {
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes())
};
MemoryRecords records = MemoryRecords.withRecords(magic, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(0L);
long offset = 0L;
for (Record record : batch)
assertEquals(offset++, record.offset());
}
}
@Test
public void testInvalidWrapperOffsetV1() {
SimpleRecord[] simpleRecords = new SimpleRecord[] {
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes())
};
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(1L);
assertThrows(InvalidRecordException.class, batch::iterator);
}
@Test
public void testSetNoTimestampTypeNotAllowed() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
assertThrows(IllegalArgumentException.class, () -> batch.setMaxTimestamp(TimestampType.NO_TIMESTAMP_TYPE, RecordBatch.NO_TIMESTAMP));
}
@Test
public void testSetLogAppendTimeNotAllowedV0() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V0, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
long logAppendTime = 15L;
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
assertThrows(UnsupportedOperationException.class, () -> batch.setMaxTimestamp(TimestampType.LOG_APPEND_TIME, logAppendTime));
}
@Test
public void testSetCreateTimeNotAllowedV0() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V0, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
long createTime = 15L;
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
assertThrows(UnsupportedOperationException.class, () -> batch.setMaxTimestamp(TimestampType.CREATE_TIME, createTime));
}
@Test
public void testSetPartitionLeaderEpochNotAllowedV0() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V0, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
assertThrows(UnsupportedOperationException.class, () -> batch.setPartitionLeaderEpoch(15));
}
@Test
public void testSetPartitionLeaderEpochNotAllowedV1() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
assertThrows(UnsupportedOperationException.class, () -> batch.setPartitionLeaderEpoch(15));
}
@Test
public void testSetLogAppendTimeV1() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
long logAppendTime = 15L;
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setMaxTimestamp(TimestampType.LOG_APPEND_TIME, logAppendTime);
assertEquals(TimestampType.LOG_APPEND_TIME, batch.timestampType());
assertEquals(logAppendTime, batch.maxTimestamp());
assertTrue(batch.isValid());
List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator());
assertEquals(1, recordBatches.size());
assertEquals(TimestampType.LOG_APPEND_TIME, recordBatches.get(0).timestampType());
assertEquals(logAppendTime, recordBatches.get(0).maxTimestamp());
for (Record record : records.records())
assertEquals(logAppendTime, record.timestamp());
}
@Test
public void testSetCreateTimeV1() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.gzip().build(), TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
long createTime = 15L;
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setMaxTimestamp(TimestampType.CREATE_TIME, createTime);
assertEquals(TimestampType.CREATE_TIME, batch.timestampType());
assertEquals(createTime, batch.maxTimestamp());
assertTrue(batch.isValid());
List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator());
assertEquals(1, recordBatches.size());
assertEquals(TimestampType.CREATE_TIME, recordBatches.get(0).timestampType());
assertEquals(createTime, recordBatches.get(0).maxTimestamp());
long expectedTimestamp = 1L;
for (Record record : records.records())
assertEquals(expectedTimestamp++, record.timestamp());
}
@Test
public void testZStdCompressionTypeWithV0OrV1() {
SimpleRecord[] simpleRecords = new SimpleRecord[] {
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes())
};
// Check V0
try {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V0, 0L,
Compression.zstd().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(1L);
batch.iterator();
fail("Can't reach here");
} catch (IllegalArgumentException e) {
assertEquals("ZStandard compression is not supported for magic 0", e.getMessage());
}
// Check V1
try {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 0L,
Compression.zstd().build(), TimestampType.CREATE_TIME, simpleRecords);
ByteBufferLegacyRecordBatch batch = new ByteBufferLegacyRecordBatch(records.buffer());
batch.setLastOffset(1L);
batch.iterator();
fail("Can't reach here");
} catch (IllegalArgumentException e) {
assertEquals("ZStandard compression is not supported for magic 1", e.getMessage());
}
}
}
| AbstractLegacyRecordBatchTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/MatchersTest.java | {
"start": 2287,
"end": 3283
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(InLoopChecker.class, getClass());
@Test
public void methodNameWithParenthesisThrows() {
try {
Matchers.instanceMethod().onExactClass("java.lang.String").named("getBytes()");
fail("Expected an IAE to be throw but wasn't");
} catch (IllegalArgumentException expected) {
}
try {
Matchers.instanceMethod().onExactClass("java.lang.String").named("getBytes)");
fail("Expected an IAE to be throw but wasn't");
} catch (IllegalArgumentException expected) {
}
try {
Matchers.instanceMethod().onExactClass("java.lang.String").named("getBytes(");
fail("Expected an IAE to be throw but wasn't");
} catch (IllegalArgumentException expected) {
}
}
@Test
public void inLoopShouldMatchInWhileLoop() {
compilationHelper
.addSourceLines(
"Test.java",
"""
public | MatchersTest |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/firewall/DefaultHttpFirewall.java | {
"start": 2165,
"end": 4608
} | class ____ implements HttpFirewall {
private boolean allowUrlEncodedSlash;
@Override
public FirewalledRequest getFirewalledRequest(HttpServletRequest request) throws RequestRejectedException {
FirewalledRequest firewalledRequest = new RequestWrapper(request);
if (!isNormalized(firewalledRequest.getServletPath()) || !isNormalized(firewalledRequest.getPathInfo())) {
throw new RequestRejectedException(
"Un-normalized paths are not supported: " + firewalledRequest.getServletPath()
+ ((firewalledRequest.getPathInfo() != null) ? firewalledRequest.getPathInfo() : ""));
}
String requestURI = firewalledRequest.getRequestURI();
if (containsInvalidUrlEncodedSlash(requestURI)) {
throw new RequestRejectedException("The requestURI cannot contain encoded slash. Got " + requestURI);
}
return firewalledRequest;
}
@Override
public HttpServletResponse getFirewalledResponse(HttpServletResponse response) {
return new FirewalledResponse(response);
}
/**
* <p>
* Sets if the application should allow a URL encoded slash character.
* </p>
* <p>
* If true (default is false), a URL encoded slash will be allowed in the URL.
* Allowing encoded slashes can cause security vulnerabilities in some situations
* depending on how the container constructs the HttpServletRequest.
* </p>
* @param allowUrlEncodedSlash the new value (default false)
*/
public void setAllowUrlEncodedSlash(boolean allowUrlEncodedSlash) {
this.allowUrlEncodedSlash = allowUrlEncodedSlash;
}
private boolean containsInvalidUrlEncodedSlash(String uri) {
if (this.allowUrlEncodedSlash || uri == null) {
return false;
}
return uri.contains("%2f") || uri.contains("%2F");
}
/**
* Checks whether a path is normalized (doesn't contain path traversal sequences like
* "./", "/../" or "/.")
* @param path the path to test
* @return true if the path doesn't contain any path-traversal character sequences.
*/
private boolean isNormalized(String path) {
if (path == null) {
return true;
}
for (int i = path.length(); i > 0;) {
int slashIndex = path.lastIndexOf('/', i - 1);
int gap = i - slashIndex;
if (gap == 2 && path.charAt(slashIndex + 1) == '.') {
// ".", "/./" or "/."
return false;
}
if (gap == 3 && path.charAt(slashIndex + 1) == '.' && path.charAt(slashIndex + 2) == '.') {
return false;
}
i = slashIndex;
}
return true;
}
}
| DefaultHttpFirewall |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/inject/NamedRecordTest.java | {
"start": 490,
"end": 1229
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addClasses(Beans.class, ListProducer.class)
.addAsResource(
new StringAsset(
"{#each cdi:beans.names}{it}::{/each}"),
"templates/foo.html"));
@Inject
Engine engine;
@Test
public void testResult() {
assertEquals("Jachym::Vojtech::Ondrej::", engine.getTemplate("foo").render());
}
// @Singleton is added automatically
@Named
public record Beans(List<String> names) {
}
@Singleton
public static | NamedRecordTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsDoubleEvaluator.java | {
"start": 3978,
"end": 4585
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory fieldVal;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory fieldVal) {
this.source = source;
this.fieldVal = fieldVal;
}
@Override
public AbsDoubleEvaluator get(DriverContext context) {
return new AbsDoubleEvaluator(source, fieldVal.get(context), context);
}
@Override
public String toString() {
return "AbsDoubleEvaluator[" + "fieldVal=" + fieldVal + "]";
}
}
}
| Factory |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/aot/InnerBeanConfiguration.java | {
"start": 923,
"end": 1021
} | class ____ {
public SimpleBean anotherBean() {
return new SimpleBean();
}
}
}
}
| Another |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java | {
"start": 862,
"end": 2498
} | class ____ extends BoundingBox<CartesianPoint> {
public static final ParseField X_FIELD = new ParseField("x");
public static final ParseField Y_FIELD = new ParseField("y");
public CartesianBoundingBox(CartesianPoint topLeft, CartesianPoint bottomRight) {
super(topLeft, bottomRight);
}
public CartesianBoundingBox(StreamInput input) throws IOException {
super(new CartesianPoint(input.readDouble(), input.readDouble()), new CartesianPoint(input.readDouble(), input.readDouble()));
}
@Override
public XContentBuilder toXContentFragment(XContentBuilder builder) throws IOException {
builder.startObject(TOP_LEFT_FIELD.getPreferredName());
builder.field(X_FIELD.getPreferredName(), topLeft.getX());
builder.field(Y_FIELD.getPreferredName(), topLeft.getY());
builder.endObject();
builder.startObject(BOTTOM_RIGHT_FIELD.getPreferredName());
builder.field(X_FIELD.getPreferredName(), bottomRight.getX());
builder.field(Y_FIELD.getPreferredName(), bottomRight.getY());
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeDouble(topLeft.getX());
out.writeDouble(topLeft.getY());
out.writeDouble(bottomRight.getX());
out.writeDouble(bottomRight.getY());
}
@Override
public final String getWriteableName() {
return "CartesianBoundingBox";
}
@Override
public final TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_11_X;
}
}
| CartesianBoundingBox |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/EmbeddedColumnNaming.java | {
"start": 990,
"end": 1232
} | class ____ {
* ...
* @Embedded
* @EmbeddedColumnNaming("home_%s")
* Address homeAddress;
* @Embedded
* @EmbeddedColumnNaming("work_%s")
* Address workAddress;
* }
*
* @Embeddable
* | Person |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/context/support/WithSecurityContextTestExecutionListener.java | {
"start": 8657,
"end": 9234
} | class ____ {
private final Supplier<SecurityContext> securityContextSupplier;
private final TestExecutionEvent testExecutionEvent;
TestSecurityContext(Supplier<SecurityContext> securityContextSupplier, TestExecutionEvent testExecutionEvent) {
this.securityContextSupplier = securityContextSupplier;
this.testExecutionEvent = testExecutionEvent;
}
Supplier<SecurityContext> getSecurityContextSupplier() {
return this.securityContextSupplier;
}
TestExecutionEvent getTestExecutionEvent() {
return this.testExecutionEvent;
}
}
}
| TestSecurityContext |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/FroMageEndpoint.java | {
"start": 240,
"end": 436
} | class ____ {
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@PUT
public FroMage putJson(FroMage fromage) {
return fromage;
}
}
| FroMageEndpoint |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java | {
"start": 2009,
"end": 5498
} | class ____ implements Entry<Text, Text> {
Text key;
Text val;
public Text getKey() {
return key;
}
public Text getValue() {
return val;
}
public Text setValue(Text val) {
this.val = val;
return val;
}
public MyEntry(Text key, Text val) {
this.key = key;
this.val = val;
}
}
/**
*
* @param type the aggregation type
* @param id the aggregation id
* @param val the val associated with the id to be aggregated
* @return an Entry whose key is the aggregation id prefixed with
* the aggregation type.
*/
public static Entry<Text, Text> generateEntry(String type,
String id, Text val) {
Text key = new Text(type + TYPE_SEPARATOR + id);
return new MyEntry(key, val);
}
/**
*
* @param type the aggregation type
* @param uniqCount the limit in the number of unique values to keep,
* if type is UNIQ_VALUE_COUNT
* @return a value aggregator of the given type.
*/
static public ValueAggregator generateValueAggregator(String type, long uniqCount) {
if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) {
return new LongValueSum();
} if (type.compareToIgnoreCase(LONG_VALUE_MAX) == 0) {
return new LongValueMax();
} else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) {
return new LongValueMin();
} else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) {
return new StringValueMax();
} else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) {
return new StringValueMin();
} else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) {
return new DoubleValueSum();
} else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) {
return new UniqValueCount(uniqCount);
} else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) {
return new ValueHistogram();
}
return null;
}
/**
* Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
* The first id will be of type LONG_VALUE_SUM, with "record_count" as
* its aggregation id. If the input is a file split,
* the second id of the same type will be generated too, with the file name
* as its aggregation id. This achieves the behavior of counting the total
* number of records in the input data, and the number of records
* in each input file.
*
* @param key
* input key
* @param val
* input value
* @return a list of aggregation id/value pairs. An aggregation id encodes an
* aggregation type which is used to guide the way to aggregate the
* value in the reduce/combiner phrase of an Aggregate based job.
*/
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String countType = LONG_VALUE_SUM;
String id = "record_count";
Entry<Text, Text> e = generateEntry(countType, id, ONE);
if (e != null) {
retv.add(e);
}
if (this.inputFile != null) {
e = generateEntry(countType, this.inputFile, ONE);
if (e != null) {
retv.add(e);
}
}
return retv;
}
/**
* get the input file name.
*
* @param conf a configuration object
*/
public void configure(Configuration conf) {
this.inputFile = conf.get(MRJobConfig.MAP_INPUT_FILE);
}
}
| MyEntry |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/target/PrototypeTargetSource.java | {
"start": 1166,
"end": 1753
} | class ____ extends AbstractPrototypeBasedTargetSource {
/**
* Obtain a new prototype instance for every call.
* @see #newPrototypeInstance()
*/
@Override
public Object getTarget() throws BeansException {
return newPrototypeInstance();
}
/**
* Destroy the given independent instance.
* @see #destroyPrototypeInstance
*/
@Override
public void releaseTarget(Object target) {
destroyPrototypeInstance(target);
}
@Override
public String toString() {
return "PrototypeTargetSource for target bean with name '" + this.targetBeanName + "'";
}
}
| PrototypeTargetSource |
java | apache__rocketmq | remoting/src/test/java/org/apache/rocketmq/remoting/protocol/body/KVTableTest.java | {
"start": 1039,
"end": 1724
} | class ____ {
@Test
public void testFromJson() throws Exception {
HashMap<String, String> table = new HashMap<>();
table.put("key1", "value1");
table.put("key2", "value2");
KVTable kvTable = new KVTable();
kvTable.setTable(table);
String json = RemotingSerializable.toJson(kvTable, true);
KVTable fromJson = RemotingSerializable.fromJson(json, KVTable.class);
assertThat(fromJson).isNotEqualTo(kvTable);
assertThat(fromJson.getTable().get("key1")).isEqualTo(kvTable.getTable().get("key1"));
assertThat(fromJson.getTable().get("key2")).isEqualTo(kvTable.getTable().get("key2"));
}
}
| KVTableTest |
java | apache__camel | components/camel-telemetry-dev/src/main/java/org/apache/camel/telemetrydev/DevTraceFormat.java | {
"start": 1254,
"end": 1363
} | interface ____ {
String format(DevTrace trace);
}
/*
* Output regular Java toString().
*/
| DevTraceFormat |
java | micronaut-projects__micronaut-core | http-server/src/main/java/io/micronaut/http/server/util/locale/HttpAbstractLocaleResolver.java | {
"start": 777,
"end": 950
} | class ____ implements {@link io.micronaut.core.util.LocaleResolver} and handles default locale resolution.
*
* @author Sergio del Amo
* @since 2.3.0
*/
public abstract | which |
java | apache__camel | components/camel-opentelemetry2/src/test/java/org/apache/camel/opentelemetry2/AsyncDirectTest.java | {
"start": 1576,
"end": 8938
} | class ____ extends OpenTelemetryTracerTestSupport {
@Override
protected CamelContext createCamelContext() throws Exception {
OpenTelemetryTracer tst = new OpenTelemetryTracer();
tst.setTracer(otelExtension.getOpenTelemetry().getTracer("traceTest"));
tst.setContextPropagators(otelExtension.getOpenTelemetry().getPropagators());
CamelContext context = super.createCamelContext();
CamelContextAware.trySetCamelContext(tst, context);
tst.init(context);
return context;
}
@Test
void testRouteMultipleRequests() throws InterruptedException, IOException {
int j = 10;
MockEndpoint mock = getMockEndpoint("mock:end");
mock.expectedMessageCount(j);
mock.setAssertPeriod(5000);
for (int i = 0; i < j; i++) {
context.createProducerTemplate().sendBody("direct:start", "Hello!");
}
mock.assertIsSatisfied(1000);
Map<String, OtelTrace> traces = otelExtension.getTraces();
// Each trace should have a unique trace id. It is enough to assert that
// the number of elements in the map is the same of the requests to prove
// all traces have been generated uniquely.
assertEquals(j, traces.size());
// Each trace should have the same structure
for (OtelTrace trace : traces.values()) {
checkTrace(trace, "Hello!");
}
}
private void checkTrace(OtelTrace trace, String expectedBody) {
List<SpanData> spans = trace.getSpans();
assertEquals(7, spans.size());
SpanData testProducer = OpenTelemetryTracerTestSupport.getSpan(spans, "direct://start", Op.EVENT_SENT);
SpanData direct = OpenTelemetryTracerTestSupport.getSpan(spans, "direct://start", Op.EVENT_RECEIVED);
SpanData newDirectTo = OpenTelemetryTracerTestSupport.getSpan(spans, "direct://new", Op.EVENT_SENT);
SpanData log = OpenTelemetryTracerTestSupport.getSpan(spans, "log://info", Op.EVENT_SENT);
SpanData newDirectFrom = OpenTelemetryTracerTestSupport.getSpan(spans, "direct://new", Op.EVENT_RECEIVED);
SpanData newLog = OpenTelemetryTracerTestSupport.getSpan(spans, "log://new", Op.EVENT_SENT);
SpanData newMock = OpenTelemetryTracerTestSupport.getSpan(spans, "mock://end", Op.EVENT_SENT);
// Validate span completion
assertTrue(testProducer.hasEnded());
assertTrue(direct.hasEnded());
assertTrue(newDirectTo.hasEnded());
assertTrue(log.hasEnded());
assertTrue(newDirectFrom.hasEnded());
assertTrue(newLog.hasEnded());
assertTrue(newMock.hasEnded());
// Validate same trace
assertEquals(testProducer.getSpanContext().getTraceId(), direct.getSpanContext().getTraceId());
assertEquals(testProducer.getSpanContext().getTraceId(), newDirectTo.getSpanContext().getTraceId());
assertEquals(testProducer.getSpanContext().getTraceId(), log.getSpanContext().getTraceId());
assertEquals(testProducer.getSpanContext().getTraceId(), newDirectFrom.getSpanContext().getTraceId());
assertEquals(testProducer.getSpanContext().getTraceId(), newLog.getSpanContext().getTraceId());
assertEquals(testProducer.getSpanContext().getTraceId(), newMock.getSpanContext().getTraceId());
// Validate same Exchange ID
// As it's a "direct" component, we expect the logic to happen within the same
// Exchange boundary
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
direct.getAttributes().get(AttributeKey.stringKey("exchangeId")));
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
newDirectTo.getAttributes().get(AttributeKey.stringKey("exchangeId")));
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
newDirectFrom.getAttributes().get(AttributeKey.stringKey("exchangeId")));
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
log.getAttributes().get(AttributeKey.stringKey("exchangeId")));
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
newLog.getAttributes().get(AttributeKey.stringKey("exchangeId")));
assertEquals(testProducer.getAttributes().get(AttributeKey.stringKey("exchangeId")),
newMock.getAttributes().get(AttributeKey.stringKey("exchangeId")));
// Validate hierarchy
assertFalse(testProducer.getParentSpanContext().isValid());
assertEquals(testProducer.getSpanContext().getSpanId(), direct.getParentSpanContext().getSpanId());
assertEquals(direct.getSpanContext().getSpanId(), newDirectTo.getParentSpanContext().getSpanId());
assertEquals(direct.getSpanContext().getSpanId(), log.getParentSpanContext().getSpanId());
assertEquals(newDirectTo.getSpanContext().getSpanId(), newDirectFrom.getParentSpanContext().getSpanId());
assertEquals(newDirectFrom.getSpanContext().getSpanId(), newLog.getParentSpanContext().getSpanId());
assertEquals(newDirectFrom.getSpanContext().getSpanId(), newMock.getParentSpanContext().getSpanId());
// Validate message logging
assertEquals("A direct message", direct.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
assertEquals("A new message", newDirectFrom.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
if (expectedBody == null) {
assertEquals(
"Exchange[ExchangePattern: InOut, BodyType: null, Body: [Body is null]]",
log.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
assertEquals(
"Exchange[ExchangePattern: InOut, BodyType: null, Body: [Body is null]]",
newLog.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
} else {
assertEquals(
"Exchange[ExchangePattern: InOnly, BodyType: String, Body: " + expectedBody + "]",
log.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
assertEquals(
"Exchange[ExchangePattern: InOnly, BodyType: String, Body: " + expectedBody + "]",
newLog.getEvents().get(0).getAttributes().get(
AttributeKey.stringKey("message")));
}
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.routeId("start")
.to("direct:new")
.log("A direct message")
.to("log:info");
from("direct:new")
.delay(2000)
.routeId("new")
.log("A new message")
.to("log:new")
.to("mock:end");
}
};
}
}
| AsyncDirectTest |
java | quarkusio__quarkus | integration-tests/gradle/src/test/java/io/quarkus/gradle/KotlinIsIncludedInQuarkusJarTest.java | {
"start": 329,
"end": 1812
} | class ____ extends QuarkusGradleWrapperTestBase {
@Test
public void testFastJarFormatWorks() throws Exception {
final File projectDir = getProjectDir("basic-kotlin-application-project");
runGradleWrapper(projectDir, "clean", "build");
final Path quarkusApp = projectDir.toPath().resolve("build").resolve("quarkus-app").resolve("app");
assertThat(quarkusApp).exists();
Path jar = quarkusApp.resolve("code-with-quarkus-unspecified.jar");
assertThat(jar).exists();
try (JarFile jarFile = new JarFile(jar.toFile())) {
assertJarContainsEntry(jarFile, "basic-kotlin-application-project/src/main/kotlin/org/acme/MyMainClass.class");
assertJarContainsEntry(jarFile, "org/acme/GreetingResource.class");
assertJarContainsEntry(jarFile, "META-INF/code-with-quarkus.kotlin_module");
}
}
private void assertJarContainsEntry(JarFile jarFile, String expectedEntry) {
boolean entryFound = false;
Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
JarEntry entry = entries.nextElement();
System.out.println(entry.getName());
if (entry.getName().equals(expectedEntry)) {
entryFound = true;
break;
}
}
assertTrue(entryFound, "Expected entry " + expectedEntry + " not found in JAR file.");
}
}
| KotlinIsIncludedInQuarkusJarTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java | {
"start": 1550,
"end": 7415
} | class ____ extends ESTestCase {
private static final String MODULE_NAME = "synthetic";
private static Set<URLClassLoader> loaders = new HashSet<>();
/**
* Test the loadClass method, which is the real entrypoint for users of the classloader
*/
public void testLoadFromJar() throws Exception {
Path topLevelDir = createTempDir(getTestName());
Path jar = topLevelDir.resolve("my-jar.jar");
createMinimalJar(jar, "p.MyClass");
try (UberModuleClassLoader loader = getLoader(jar)) {
{
Class<?> c = loader.loadClass("p.MyClass");
assertThat(c, notNullValue());
Object instance = c.getConstructor().newInstance();
assertThat(instance.toString(), equalTo("MyClass"));
assertThat(c.getModule().getName(), equalTo(MODULE_NAME));
}
{
ClassNotFoundException e = expectThrows(ClassNotFoundException.class, () -> loader.loadClass("p.DoesNotExist"));
assertThat(e.getMessage(), equalTo("p.DoesNotExist"));
}
}
}
/**
* Test the findClass method, which we overrode but which will not be called by
* users of the classloader
*/
public void testSingleJarFindClass() throws Exception {
Path topLevelDir = createTempDir(getTestName());
Path jar = topLevelDir.resolve("my-jar-with-resources.jar");
createMinimalJar(jar, "p.MyClass");
{
try (UberModuleClassLoader loader = getLoader(jar)) {
Class<?> c = loader.findClass("p.MyClass");
assertThat(c, notNullValue());
c = loader.findClass("p.DoesNotExist");
assertThat(c, nullValue());
}
}
{
try (UberModuleClassLoader loader = getLoader(jar)) {
Class<?> c = loader.findClass(MODULE_NAME, "p.MyClass");
assertThat(c, notNullValue());
c = loader.findClass(MODULE_NAME, "p.DoesNotExist");
assertThat(c, nullValue());
c = loader.findClass("does-not-exist", "p.MyClass");
assertThat(c, nullValue());
c = loader.findClass(null, "p.MyClass");
assertThat(c, nullValue());
}
}
}
public void testSingleJarFindResources() throws Exception {
Path topLevelDir = createTempDir(getTestName());
Path jar = topLevelDir.resolve("my-jar-with-resources.jar");
Map<String, CharSequence> sources = new HashMap<>();
sources.put("p." + "MyClass", getMinimalSourceString("p", "MyClass", "MyClass"));
var classToBytes = InMemoryJavaCompiler.compile(sources);
Map<String, byte[]> jarEntries = new HashMap<>();
jarEntries.put("p/" + "MyClass" + ".class", classToBytes.get("p." + "MyClass"));
jarEntries.put("META-INF/resource.txt", "my resource".getBytes(StandardCharsets.UTF_8));
JarUtils.createJarWithEntries(jar, jarEntries);
try (UberModuleClassLoader loader = getLoader(jar)) {
{
URL location = loader.findResource("p/MyClass.class");
assertThat(location, notNullValue());
location = loader.findResource("p/DoesNotExist.class");
assertThat(location, nullValue());
location = loader.findResource("META-INF/resource.txt");
assertThat(location, notNullValue());
location = loader.findResource("META-INF/does_not_exist.txt");
assertThat(location, nullValue());
}
{
URL location = loader.findResource(MODULE_NAME, "p/MyClass.class");
assertThat(location, notNullValue());
location = loader.findResource(MODULE_NAME, "p/DoesNotExist.class");
assertThat(location, nullValue());
location = loader.findResource("does-not-exist", "p/MyClass.class");
assertThat(location, nullValue());
location = loader.findResource(null, "p/MyClass.class");
assertThat(location, nullValue());
}
{
Enumeration<URL> locations = loader.findResources("p/MyClass.class");
assertTrue(locations.hasMoreElements());
locations = loader.findResources("p/DoesNotExist.class");
assertFalse(locations.hasMoreElements());
locations = loader.findResources("META-INF/resource.txt");
assertTrue(locations.hasMoreElements());
locations = loader.findResources("META-INF/does_not_exist.txt");
assertFalse(locations.hasMoreElements());
}
}
}
public void testHideSplitPackageInParentClassloader() throws Exception {
Path tempDir = createTempDir(getTestName());
Path overlappingJar = tempDir.resolve("my-split-package.jar");
createTwoClassJar(overlappingJar, "ParentJarClassInPackageP");
Path jar = tempDir.resolve("my-jar.jar");
createMinimalJar(jar, "p.MyClassInPackageP");
URL[] urls = new URL[] { toUrl(overlappingJar) };
try (
URLClassLoader parent = URLClassLoader.newInstance(urls, UberModuleClassLoaderTests.class.getClassLoader());
UberModuleClassLoader loader = UberModuleClassLoader.getInstance(parent, MODULE_NAME, Set.of(toUrl(jar)))
) {
// stable plugin loader gives us the good class...
Class<?> c = loader.loadClass("p.MyClassInPackageP");
Object instance = c.getConstructor().newInstance();
assertThat(instance.toString(), equalTo("MyClassInPackageP"));
// but stable plugin loader can't find the | UberModuleClassLoaderTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builder/nestedprop/expanding/ImmutableArticle.java | {
"start": 216,
"end": 591
} | class ____ {
private final String description;
ImmutableArticle(ImmutableArticle.Builder builder) {
this.description = builder.description;
}
public static ImmutableArticle.Builder builder() {
return new ImmutableArticle.Builder();
}
public String getDescription() {
return description;
}
public static | ImmutableArticle |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileRenameReadLockMustUseMarkerFileTest.java | {
"start": 1182,
"end": 3044
} | class ____ extends ContextTestSupport {
@Test
public void testCamelLockFile() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("Bye World");
mock.message(0).header(Exchange.FILE_NAME).isEqualTo("bye.txt");
template.sendBodyAndHeader(fileUri(), "Bye World", Exchange.FILE_NAME, "bye.txt");
// start the route
context.getRouteController().startRoute("foo");
assertMockEndpointsSatisfied();
assertTrue(oneExchangeDone.matchesWaitTime());
// and lock file should be deleted
assertFileNotExists(testFile("bye.txt" + FileComponent.DEFAULT_LOCK_FILE_POSTFIX));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri("?readLock=rename&initialDelay=0&delay=10")).routeId("foo").autoStartup(false)
.process(new Processor() {
@Override
public void process(Exchange exchange) {
// got a file, so we should have a .camelLock file as
// well
String name = exchange.getIn().getHeader(Exchange.FILE_PATH)
+ FileComponent.DEFAULT_LOCK_FILE_POSTFIX;
File lock = new File(name);
// lock file should exist
assertTrue(lock.exists(), "Lock file should exist: " + name);
}
}).convertBodyTo(String.class).to("mock:result");
}
};
}
}
| FileRenameReadLockMustUseMarkerFileTest |
java | spring-projects__spring-framework | spring-core-test/src/test/java/org/springframework/aot/agent/InstrumentedMethodTests.java | {
"start": 20169,
"end": 21769
} | class ____ {
RecordedInvocation newProxyInstance;
@BeforeEach
void setup() {
this.newProxyInstance = RecordedInvocation.of(InstrumentedMethod.PROXY_NEWPROXYINSTANCE)
.withArguments(ClassLoader.getSystemClassLoader(), new Class[] { AutoCloseable.class, Comparator.class }, null)
.returnValue(Proxy.newProxyInstance(ClassLoader.getSystemClassLoader(), new Class[] { AutoCloseable.class, Comparator.class }, (proxy, method, args) -> null))
.build();
}
@Test
void proxyNewProxyInstanceShouldMatchWhenInterfacesMatch() {
hints.proxies().registerJdkProxy(AutoCloseable.class, Comparator.class);
assertThatInvocationMatches(InstrumentedMethod.PROXY_NEWPROXYINSTANCE, this.newProxyInstance);
}
@Test
void proxyNewProxyInstanceShouldNotMatchWhenInterfacesDoNotMatch() {
hints.proxies().registerJdkProxy(Comparator.class);
assertThatInvocationDoesNotMatch(InstrumentedMethod.PROXY_NEWPROXYINSTANCE, this.newProxyInstance);
}
@Test
void proxyNewProxyInstanceShouldNotMatchWhenWrongOrder() {
hints.proxies().registerJdkProxy(Comparator.class, AutoCloseable.class);
assertThatInvocationDoesNotMatch(InstrumentedMethod.PROXY_NEWPROXYINSTANCE, this.newProxyInstance);
}
}
private void assertThatInvocationMatches(InstrumentedMethod method, RecordedInvocation invocation) {
assertThat(method.matcher(invocation)).accepts(this.hints);
}
private void assertThatInvocationDoesNotMatch(InstrumentedMethod method, RecordedInvocation invocation) {
assertThat(method.matcher(invocation)).rejects(this.hints);
}
static | ProxiesInstrumentationTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java | {
"start": 1926,
"end": 8994
} | class ____ extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(TimelineCollectorManager.class);
private TimelineWriter writer;
private ScheduledExecutorService writerFlusher;
private int flushInterval;
private boolean writerFlusherRunning;
@Override
protected void serviceInit(Configuration conf) throws Exception {
writer = createTimelineWriter(conf);
writer.init(conf);
// create a single dedicated thread for flushing the writer on a periodic
// basis
writerFlusher = Executors.newSingleThreadScheduledExecutor();
flushInterval = conf.getInt(
YarnConfiguration.
TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS);
super.serviceInit(conf);
}
private TimelineWriter createTimelineWriter(final Configuration conf) {
String timelineWriterClassName = conf.get(
YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WRITER_CLASS);
LOG.info("Using TimelineWriter: {}", timelineWriterClassName);
try {
Class<?> timelineWriterClazz = Class.forName(timelineWriterClassName);
if (TimelineWriter.class.isAssignableFrom(timelineWriterClazz)) {
return (TimelineWriter) ReflectionUtils.newInstance(
timelineWriterClazz, conf);
} else {
throw new YarnRuntimeException("Class: " + timelineWriterClassName
+ " not instance of " + TimelineWriter.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate TimelineWriter: "
+ timelineWriterClassName, e);
}
}
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
if (writer != null) {
writer.start();
}
// schedule the flush task
writerFlusher.scheduleAtFixedRate(new WriterFlushTask(writer),
flushInterval, flushInterval, TimeUnit.SECONDS);
writerFlusherRunning = true;
}
// access to this map is synchronized with the map itself
private final Map<ApplicationId, TimelineCollector> collectors =
Collections.synchronizedMap(
new HashMap<ApplicationId, TimelineCollector>());
public TimelineCollectorManager(String name) {
super(name);
}
protected TimelineWriter getWriter() {
return writer;
}
/**
* Put the collector into the collection if an collector mapped by id does
* not exist.
*
* @param appId Application Id for which collector needs to be put.
* @param collector timeline collector to be put.
* @throws YarnRuntimeException if there was any exception in initializing
* and starting the app level service
* @return the collector associated with id after the potential put.
*/
public TimelineCollector putIfAbsent(ApplicationId appId,
TimelineCollector collector) {
TimelineCollector collectorInTable = null;
synchronized (collectors) {
collectorInTable = collectors.get(appId);
if (collectorInTable == null) {
try {
// initialize, start, and add it to the collection so it can be
// cleaned up when the parent shuts down
collector.init(getConfig());
collector.setWriter(writer);
collector.start();
collectors.put(appId, collector);
LOG.info("the collector for {} was added", appId);
collectorInTable = collector;
postPut(appId, collectorInTable);
} catch (Exception e) {
throw new YarnRuntimeException(e);
}
} else {
LOG.info("the collector for {} already exists!", appId);
}
}
return collectorInTable;
}
/**
* Callback handler for the timeline collector manager when a collector has
* been added into the collector map.
* @param appId Application id of the collector.
* @param collector The actual timeline collector that has been added.
*/
public void postPut(ApplicationId appId, TimelineCollector collector) {
doPostPut(appId, collector);
collector.setReadyToAggregate();
}
/**
* A template method that will be called by
* {@link #postPut(ApplicationId, TimelineCollector)}.
* @param appId Application id of the collector.
* @param collector The actual timeline collector that has been added.
*/
protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
}
/**
* Removes the collector for the specified id. The collector is also stopped
* as a result. If the collector does not exist, no change is made.
*
* @param appId Application Id to remove.
* @return whether it was removed successfully
*/
public boolean remove(ApplicationId appId) {
TimelineCollector collector = collectors.remove(appId);
if (collector == null) {
LOG.error("the collector for {} does not exist!", appId);
} else {
synchronized (collector) {
postRemove(appId, collector);
// stop the service to do clean up
collector.stop();
}
LOG.info("The collector service for {} was removed", appId);
}
return collector != null;
}
protected void postRemove(ApplicationId appId, TimelineCollector collector) {
}
/**
* Returns the collector for the specified id.
*
* @param appId Application Id for which we need to get the collector.
* @return the collector or null if it does not exist
*/
public TimelineCollector get(ApplicationId appId) {
return collectors.get(appId);
}
/**
* Returns whether the collector for the specified id exists in this
* collection.
* @param appId Application Id.
* @return true if collector for the app id is found, false otherwise.
*/
public boolean containsTimelineCollector(ApplicationId appId) {
return collectors.containsKey(appId);
}
@Override
protected void serviceStop() throws Exception {
if (collectors != null && collectors.size() > 0) {
synchronized (collectors) {
for (TimelineCollector c : collectors.values()) {
c.serviceStop();
}
}
}
// stop the flusher first
if (writerFlusher != null) {
writerFlusher.shutdown();
writerFlusherRunning = false;
if (!writerFlusher.awaitTermination(30, TimeUnit.SECONDS)) {
// in reality it should be ample time for the flusher task to finish
// even if it times out, writers may be able to handle closing in this
// situation fine
// proceed to close the writer
LOG.warn("failed to stop the flusher task in time. " +
"will still proceed to close the writer.");
}
}
if (writer != null) {
writer.close();
}
super.serviceStop();
}
@VisibleForTesting
boolean writerFlusherRunning() {
return writerFlusherRunning;
}
/**
* Task that invokes the flush operation on the timeline writer.
*/
private static | TimelineCollectorManager |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/interceptor/TransactionalClientDataSourceWithOnExceptionRollbackTest.java | {
"start": 1385,
"end": 3783
} | class ____ extends TransactionalClientDataSourceTest {
@Override
@Test
public void testTransactionRollback() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:error");
mock.expectedMessageCount(1);
try {
template.sendBody("direct:fail", "Hello World");
fail("Should have thrown exception");
} catch (RuntimeCamelException e) {
// expected as we fail
assertIsInstanceOf(RuntimeCamelException.class, e.getCause());
RollbackExchangeException rollback = assertIsInstanceOf(RollbackExchangeException.class, e.getCause().getCause());
assertEquals("Donkey in Action", rollback.getExchange().getIn().getBody());
}
assertMockEndpointsSatisfied();
int count = jdbc.queryForObject("select count(*) from books", Integer.class);
assertEquals(1, count, "Number of books");
}
@Override
// The API is deprecated, we can remove warnings safely as the tests will disappear when removing this component.
@SuppressWarnings("deprecation")
protected RouteBuilder createRouteBuilder() throws Exception {
return new SpringRouteBuilder() {
public void configure() throws Exception {
// use required as transaction policy
SpringTransactionPolicy required = lookup("PROPAGATION_REQUIRED", SpringTransactionPolicy.class);
// configure to use transaction error handler and pass on the required as it will fetch
// the transaction manager from it that it needs
errorHandler(transactionErrorHandler(required));
// on exception is also supported
onException(IllegalArgumentException.class).handled(false).to("mock:error").rollback();
from("direct:okay")
.policy(required)
.setBody(constant("Tiger in Action")).bean("bookService")
.setBody(constant("Elephant in Action")).bean("bookService");
from("direct:fail")
.policy(required)
.setBody(constant("Tiger in Action")).bean("bookService")
.setBody(constant("Donkey in Action")).bean("bookService");
}
};
}
}
| TransactionalClientDataSourceWithOnExceptionRollbackTest |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java | {
"start": 1371,
"end": 13539
} | class ____ {
public static final long NO_OFFSET = -1;
public static final ClusterConfigState EMPTY = new ClusterConfigState(
NO_OFFSET,
null,
Map.of(),
Map.of(),
Map.of(),
Map.of(),
Map.of(),
Map.of(),
Map.of(),
Set.of(),
Set.of());
private final long offset;
private final SessionKey sessionKey;
private final WorkerConfigTransformer configTransformer;
final Map<String, Integer> connectorTaskCounts;
final Map<String, Map<String, String>> connectorConfigs;
final Map<String, TargetState> connectorTargetStates;
final Map<ConnectorTaskId, Map<String, String>> taskConfigs;
final Map<String, Integer> connectorTaskCountRecords;
final Map<String, Integer> connectorTaskConfigGenerations;
final Map<String, AppliedConnectorConfig> appliedConnectorConfigs;
final Set<String> connectorsPendingFencing;
final Set<String> inconsistentConnectors;
public ClusterConfigState(long offset,
SessionKey sessionKey,
Map<String, Integer> connectorTaskCounts,
Map<String, Map<String, String>> connectorConfigs,
Map<String, TargetState> connectorTargetStates,
Map<ConnectorTaskId, Map<String, String>> taskConfigs,
Map<String, Integer> connectorTaskCountRecords,
Map<String, Integer> connectorTaskConfigGenerations,
Map<String, AppliedConnectorConfig> appliedConnectorConfigs,
Set<String> connectorsPendingFencing,
Set<String> inconsistentConnectors) {
this(offset,
sessionKey,
connectorTaskCounts,
connectorConfigs,
connectorTargetStates,
taskConfigs,
connectorTaskCountRecords,
connectorTaskConfigGenerations,
appliedConnectorConfigs,
connectorsPendingFencing,
inconsistentConnectors,
null);
}
public ClusterConfigState(long offset,
SessionKey sessionKey,
Map<String, Integer> connectorTaskCounts,
Map<String, Map<String, String>> connectorConfigs,
Map<String, TargetState> connectorTargetStates,
Map<ConnectorTaskId, Map<String, String>> taskConfigs,
Map<String, Integer> connectorTaskCountRecords,
Map<String, Integer> connectorTaskConfigGenerations,
Map<String, AppliedConnectorConfig> appliedConnectorConfigs,
Set<String> connectorsPendingFencing,
Set<String> inconsistentConnectors,
WorkerConfigTransformer configTransformer) {
this.offset = offset;
this.sessionKey = sessionKey;
this.connectorTaskCounts = connectorTaskCounts;
this.connectorConfigs = connectorConfigs;
this.connectorTargetStates = connectorTargetStates;
this.taskConfigs = taskConfigs;
this.connectorTaskCountRecords = connectorTaskCountRecords;
this.connectorTaskConfigGenerations = connectorTaskConfigGenerations;
this.appliedConnectorConfigs = appliedConnectorConfigs;
this.connectorsPendingFencing = connectorsPendingFencing;
this.inconsistentConnectors = inconsistentConnectors;
this.configTransformer = configTransformer;
}
/**
* Get the last offset read to generate this config state. This offset is not guaranteed to be perfectly consistent
* with the recorded state because some partial updates to task configs may have been read.
* @return the latest config offset
*/
public long offset() {
return offset;
}
/**
* Get the latest session key from the config state
* @return the {@link SessionKey session key}; may be null if no key has been read yet
*/
public SessionKey sessionKey() {
return sessionKey;
}
/**
* Check whether this snapshot contains configuration for a connector.
* @param connector name of the connector
* @return true if this state contains configuration for the connector, false otherwise
*/
public boolean contains(String connector) {
return connectorConfigs.containsKey(connector);
}
/**
* Get a list of the connectors in this configuration
*/
public Set<String> connectors() {
return connectorConfigs.keySet();
}
/**
* Get the configuration for a connector. The configuration will have been transformed by
* {@link org.apache.kafka.common.config.ConfigTransformer} by having all variable
* references replaced with the current values from external instances of
* {@link ConfigProvider}, and may include secrets.
* @param connector name of the connector
* @return a map containing configuration parameters
*/
public Map<String, String> connectorConfig(String connector) {
Map<String, String> configs = connectorConfigs.get(connector);
if (configTransformer != null) {
configs = configTransformer.transform(connector, configs);
}
return configs;
}
public Map<String, String> rawConnectorConfig(String connector) {
return connectorConfigs.get(connector);
}
/**
* Get the most recent configuration for the connector from which task configs have
* been generated. The configuration will have been transformed by
* {@link org.apache.kafka.common.config.ConfigTransformer}
* @param connector name of the connector
* @return the connector config, or null if no config exists from which task configs have
* been generated
*/
public Map<String, String> appliedConnectorConfig(String connector) {
AppliedConnectorConfig appliedConfig = appliedConnectorConfigs.get(connector);
return appliedConfig != null ? appliedConfig.transformedConfig(configTransformer) : null;
}
/**
* Get the target state of the connector
* @param connector name of the connector
* @return the target state
*/
public TargetState targetState(String connector) {
return connectorTargetStates.get(connector);
}
/**
* Get the configuration for a task. The configuration will have been transformed by
* {@link org.apache.kafka.common.config.ConfigTransformer} by having all variable
* references replaced with the current values from external instances of
* {@link ConfigProvider}, and may include secrets.
* @param task id of the task
* @return a map containing configuration parameters
*/
public Map<String, String> taskConfig(ConnectorTaskId task) {
Map<String, String> configs = taskConfigs.get(task);
if (configTransformer != null) {
configs = configTransformer.transform(task.connector(), configs);
}
return configs;
}
public Map<String, String> rawTaskConfig(ConnectorTaskId task) {
return taskConfigs.get(task);
}
/**
* Get the number of tasks for a given connector.
* @param connectorName name of the connector to look up tasks for
* @return the number of tasks
*/
public int taskCount(String connectorName) {
Integer count = connectorTaskCounts.get(connectorName);
return count == null ? 0 : count;
}
/**
* Get whether the connector requires a round of zombie fencing before
* a new generation of tasks can be brought up for it.
* @param connectorName name of the connector
*/
public boolean pendingFencing(String connectorName) {
return connectorsPendingFencing.contains(connectorName);
}
/**
* Get the current set of task IDs for the specified connector.
* @param connectorName the name of the connector to look up task configs for
* @return the current set of connector task IDs
*/
public List<ConnectorTaskId> tasks(String connectorName) {
if (inconsistentConnectors.contains(connectorName)) {
return List.of();
}
Integer numTasks = connectorTaskCounts.get(connectorName);
if (numTasks == null) {
return List.of();
}
List<ConnectorTaskId> taskIds = new ArrayList<>(numTasks);
for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
taskIds.add(taskId);
}
return List.copyOf(taskIds);
}
/**
* Get the task count record for the connector, if one exists
* @param connector name of the connector
* @return the latest task count record for the connector, or {@code null} if none exists
*/
public Integer taskCountRecord(String connector) {
return connectorTaskCountRecords.get(connector);
}
/**
* Get the generation number for the connector's task configurations, if one exists.
* Generation numbers increase monotonically each time a new set of task configurations is detected for the connector
* @param connector name of the connector
* @return the latest task config generation number for the connector, or {@code null} if none exists
*/
public Integer taskConfigGeneration(String connector) {
return connectorTaskConfigGenerations.get(connector);
}
/**
* Get the set of connectors which have inconsistent data in this snapshot. These inconsistencies can occur due to
* partially completed writes combined with log compaction.
* <p>
* Connectors in this set will appear in the output of {@link #connectors()} since their connector configuration is
* available, but not in the output of {@link #tasks(String)} since the task configs are incomplete.
* <p>
* When a worker detects a connector in this state, it should request that the connector regenerate its task
* configurations.
*
* @return the set of inconsistent connectors
*/
public Set<String> inconsistentConnectors() {
return inconsistentConnectors;
}
@Override
public String toString() {
return "ClusterConfigState{" +
"offset=" + offset +
", sessionKey=" + (sessionKey != null ? "[hidden]" : "null") +
", connectorTaskCounts=" + connectorTaskCounts +
", connectorConfigs=" + connectorConfigs +
", taskConfigs=" + taskConfigs +
", inconsistentConnectors=" + inconsistentConnectors +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterConfigState that = (ClusterConfigState) o;
return offset == that.offset &&
Objects.equals(sessionKey, that.sessionKey) &&
Objects.equals(connectorTaskCounts, that.connectorTaskCounts) &&
Objects.equals(connectorConfigs, that.connectorConfigs) &&
Objects.equals(connectorTargetStates, that.connectorTargetStates) &&
Objects.equals(taskConfigs, that.taskConfigs) &&
Objects.equals(inconsistentConnectors, that.inconsistentConnectors) &&
Objects.equals(configTransformer, that.configTransformer);
}
@Override
public int hashCode() {
return Objects.hash(
offset,
sessionKey,
connectorTaskCounts,
connectorConfigs,
connectorTargetStates,
taskConfigs,
inconsistentConnectors,
configTransformer);
}
}
| ClusterConfigState |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/JobSubmissionResult.java | {
"start": 959,
"end": 2032
} | class ____ {
private final JobID jobID;
public JobSubmissionResult(JobID jobID) {
this.jobID = jobID;
}
/**
* Returns the JobID assigned to the job by the Flink runtime.
*
* @return jobID, or null if the job has been executed on a runtime without JobIDs or if the
* execution failed.
*/
public JobID getJobID() {
return jobID;
}
/**
* Checks if this JobSubmissionResult is also a JobExecutionResult. See {@code
* getJobExecutionResult} to retrieve the JobExecutionResult.
*
* @return True if this is a JobExecutionResult, false otherwise
*/
public boolean isJobExecutionResult() {
return false;
}
/**
* Returns the JobExecutionResult if available.
*
* @return The JobExecutionResult
* @throws ClassCastException if this is not a JobExecutionResult
*/
public JobExecutionResult getJobExecutionResult() {
throw new ClassCastException("This JobSubmissionResult is not a JobExecutionResult.");
}
}
| JobSubmissionResult |
java | quarkusio__quarkus | extensions/keycloak-admin-rest-client/runtime/src/main/java/io/quarkus/keycloak/admin/rest/client/runtime/KeycloakAdminRestClientProvider.java | {
"start": 1235,
"end": 6323
} | class ____ implements ResteasyClientProvider {
private static final List<String> HANDLED_MEDIA_TYPES = List.of(MediaType.APPLICATION_JSON);
private static final int WRITER_PROVIDER_PRIORITY = Priorities.USER + 100; // ensures that it will be used first
private static final int READER_PROVIDER_PRIORITY = Priorities.USER - 100; // ensures that it will be used first
private final boolean tlsTrustAll;
private final TlsConfig tlsConfig;
public KeycloakAdminRestClientProvider(boolean tlsTrustAll) {
this.tlsTrustAll = tlsTrustAll;
this.tlsConfig = null;
}
public KeycloakAdminRestClientProvider(TlsConfiguration tlsConfiguration) {
tlsTrustAll = tlsConfiguration.isTrustAll();
this.tlsConfig = createTlsConfig(tlsConfiguration);
}
@Override
public Client newRestEasyClient(Object messageHandler, SSLContext sslContext, boolean disableTrustManager) {
ClientBuilderImpl clientBuilder = new ClientBuilderImpl();
if (tlsConfig == null) {
clientBuilder.trustAll(tlsTrustAll || disableTrustManager);
} else {
clientBuilder.tlsConfig(tlsConfig);
}
return registerJacksonProviders(clientBuilder).build();
}
// this code is much more complicated than expected because it needs to handle various permutations
// where beans may or may not exist
private ClientBuilderImpl registerJacksonProviders(ClientBuilderImpl clientBuilder) {
ArcContainer arcContainer = Arc.container();
if (arcContainer == null) {
throw new IllegalStateException(this.getClass().getName() + " should only be used in a Quarkus application");
} else {
ObjectMapper newObjectMapper = newKeycloakAdminClientObjectMapper();
clientBuilder = clientBuilder
.registerMessageBodyReader(new JacksonBasicMessageBodyReader(newObjectMapper), Object.class,
HANDLED_MEDIA_TYPES, true,
READER_PROVIDER_PRIORITY)
.registerMessageBodyWriter(new ClientJacksonMessageBodyWriter(newObjectMapper), Object.class,
HANDLED_MEDIA_TYPES, true, WRITER_PROVIDER_PRIORITY);
InstanceHandle<ClientLogger> clientLogger = arcContainer.instance(ClientLogger.class);
if (clientLogger.isAvailable()) {
clientBuilder.clientLogger(clientLogger.get());
}
}
return clientBuilder;
}
// creates new ObjectMapper compatible with Keycloak Admin Client
private ObjectMapper newKeycloakAdminClientObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
// Same like JSONSerialization class. Makes it possible to use admin-client against older versions of Keycloak server where the properties on representations might be different
objectMapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
// The client must work with the newer versions of Keycloak server, which might contain the JSON fields not yet known by the client. So unknown fields will be ignored.
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
return objectMapper;
}
@Override
public <R> R targetProxy(WebTarget target, Class<R> targetClass) {
return ((WebTargetImpl) target).proxy(targetClass);
}
private static TlsConfig createTlsConfig(TlsConfiguration tlsConfiguration) {
return new TlsConfig() {
@Override
public KeyStore getKeyStore() {
return tlsConfiguration.getKeyStore();
}
@Override
public KeyCertOptions getKeyStoreOptions() {
return tlsConfiguration.getKeyStoreOptions();
}
@Override
public KeyStore getTrustStore() {
return tlsConfiguration.getTrustStore();
}
@Override
public TrustOptions getTrustStoreOptions() {
return tlsConfiguration.getTrustStoreOptions();
}
@Override
public SSLOptions getSSLOptions() {
return tlsConfiguration.getSSLOptions();
}
@Override
public SSLContext createSSLContext() throws Exception {
return tlsConfiguration.createSSLContext();
}
@Override
public Optional<String> getHostnameVerificationAlgorithm() {
return tlsConfiguration.getHostnameVerificationAlgorithm();
}
@Override
public boolean usesSni() {
return tlsConfiguration.usesSni();
}
@Override
public boolean isTrustAll() {
return tlsConfiguration.isTrustAll();
}
@Override
public Optional<String> getName() {
return Optional.ofNullable(tlsConfiguration.getName());
}
};
}
}
| KeycloakAdminRestClientProvider |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-proxyexchange-webmvc/src/main/java/org/springframework/cloud/gateway/mvc/config/ProxyResponseAutoConfiguration.java | {
"start": 2592,
"end": 4015
} | class ____ implements WebMvcConfigurer {
@Autowired
private ApplicationContext context;
@Bean
@ConditionalOnMissingBean
public ProxyExchangeArgumentResolver proxyExchangeArgumentResolver(Optional<RestTemplateBuilder> optional,
ProxyExchangeWebMvcProperties properties) {
RestTemplateBuilder builder = optional.orElse(new RestTemplateBuilder());
RestTemplate template = builder.build();
template.setErrorHandler(new NoOpResponseErrorHandler());
template.getMessageConverters().add(new ByteArrayHttpMessageConverter() {
@Override
public boolean supports(Class<?> clazz) {
return true;
}
});
ProxyExchangeArgumentResolver resolver = new ProxyExchangeArgumentResolver(template);
resolver.setHeaders(properties.convertHeaders());
resolver.setAutoForwardedHeaders(properties.getAutoForward());
Set<String> excludedHeaderNames = new HashSet<>();
if (!CollectionUtils.isEmpty(properties.getSensitive())) {
excludedHeaderNames.addAll(properties.getSensitive());
}
if (!CollectionUtils.isEmpty(properties.getSkipped())) {
excludedHeaderNames.addAll(properties.getSkipped());
}
resolver.setExcluded(excludedHeaderNames);
return resolver;
}
@Override
public void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers) {
argumentResolvers.add(context.getBean(ProxyExchangeArgumentResolver.class));
}
private static final | ProxyResponseAutoConfiguration |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/spi/JsonFactory.java | {
"start": 694,
"end": 1677
} | interface ____ {
/**
* <p> Load the JSON factory with the {@code ServiceLoader}
*
* <ul>
* <li>An attempt is made to load a factory using the service loader {@code META-INF/services} {@link JsonFactory}.</li>
* <li>Factories are sorted </li>
* <li>If not factory is resolved (which is usually the default case), {@link JacksonFactory#INSTANCE} is used.</li>
* </ul>
*
* <p> When the default Jackson codec is used and {@code jackson-databind} is available then a codec using it
* will be used otherwise the codec will only use {@code jackson-core} and provide best effort mapping.
*/
static JsonFactory load() {
return Utils.load();
}
/**
* The order of the factory. If there is more than one matching factory they will be tried in ascending order.
*
* @implSpec returns {@link Integer#MAX_VALUE}
*
* @return the order
*/
default int order() {
return Integer.MAX_VALUE;
}
JsonCodec codec();
}
| JsonFactory |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/reflect/AvroAliases.java | {
"start": 1087,
"end": 1136
} | interface ____ {
AvroAlias[] value();
}
| AvroAliases |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/AllNestedConditions.java | {
"start": 852,
"end": 971
} | class ____ match. Can be used
* to create composite conditions, for example:
*
* <pre class="code">
* static | conditions |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/Filter.java | {
"start": 4453,
"end": 5441
} | class ____ {
// filter instance name
final String name;
final FilterConfig filterConfig;
NamedFilterConfig(String name, FilterConfig filterConfig) {
this.name = name;
this.filterConfig = filterConfig;
}
String filterStateKey() {
return name + "_" + filterConfig.typeUrl();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
NamedFilterConfig that = (NamedFilterConfig) o;
return Objects.equals(name, that.name)
&& Objects.equals(filterConfig, that.filterConfig);
}
@Override
public int hashCode() {
return Objects.hash(name, filterConfig);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("name", name)
.add("filterConfig", filterConfig)
.toString();
}
}
}
| NamedFilterConfig |
java | quarkusio__quarkus | test-framework/arquillian/src/test/java/io/quarkus/arquillian/test/MethodParameterInjectionTest.java | {
"start": 1157,
"end": 2297
} | class ____ {
@Deployment
public static JavaArchive createTestArchive() {
return ShrinkWrap.create(JavaArchive.class).addClasses(AppScopedBean1.class, AppScopedBean2.class);
}
@Test
public void injectOneApplicationScopedBean(AppScopedBean1 param) {
assertNotNull("Method param was not injected", param);
assertNotNull("@Inject did not work", injected);
assertEquals(injected, param);
}
@Test
public void injectTwoApplicationScopedBeans(AppScopedBean1 param1, AppScopedBean2 param2) {
assertNotNull("Method param was not injected", param1);
assertNotNull("Method param was not injected", param2);
}
@Test
public void injectFromProducer(OtherBean param1, OtherBean param2) {
assertNotNull(param1);
assertNotNull(param2);
assertNotSame(param1, param2);
}
@Test
public void injectWithQualifier(@Good X param) {
assertNotNull(param);
assertTrue(Y.class.isAssignableFrom(param.getClass()));
assertFalse(Z.class.isAssignableFrom(param.getClass()));
}
public | MethodParameterInjectionTest |
java | apache__camel | components/camel-univocity-parsers/src/test/java/org/apache/camel/dataformat/univocity/UniVocityFixedDataFormatTest.java | {
"start": 1206,
"end": 1284
} | class ____ the options of {@link UniVocityFixedDataFormat}.
*/
public final | tests |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveRootCauseInstance.java | {
"start": 1009,
"end": 2447
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link BasicErrorMessageFactory}</code>.
*
* @param actual the actual value in the failed assertion.
* @param expectedCauseType the expected cause type.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveRootCauseInstance(Throwable actual,
Class<? extends Throwable> expectedCauseType) {
return Throwables.getRootCause(actual) == null
? new ShouldHaveRootCauseInstance(expectedCauseType, actual)
: new ShouldHaveRootCauseInstance(actual, expectedCauseType);
}
private ShouldHaveRootCauseInstance(Throwable actual, Class<? extends Throwable> expectedCauseType) {
super("%nExpecting a throwable with root cause being an instance of:%n %s%nbut was an instance of:%n %s%n" +
"%nThrowable that failed the check:%n" + escapePercent(getStackTrace(actual)),
expectedCauseType, Throwables.getRootCause(actual).getClass());
}
private ShouldHaveRootCauseInstance(Class<? extends Throwable> expectedCauseType, Throwable actual) {
super("%nExpecting a throwable with root cause being an instance of:%n %s%nbut current throwable has no cause." +
"%nThrowable that failed the check:%n" + escapePercent(getStackTrace(actual)), expectedCauseType);
}
}
| ShouldHaveRootCauseInstance |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/ExecutionGraphToInputsLocationsRetrieverAdapterTest.java | {
"start": 2567,
"end": 9919
} | class ____ {
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_EXTENSION =
TestingUtils.defaultExecutorExtension();
@Test
void testGetConsumedPartitionGroupsAndProducers() throws Exception {
final JobVertex producer1 = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex producer2 = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex consumer = ExecutionGraphTestUtils.createNoOpVertex(1);
final IntermediateDataSet dataSet1 =
connectNewDataSetAsInput(
consumer,
producer1,
DistributionPattern.ALL_TO_ALL,
ResultPartitionType.PIPELINED)
.getSource();
final IntermediateDataSet dataSet2 =
connectNewDataSetAsInput(
consumer,
producer2,
DistributionPattern.ALL_TO_ALL,
ResultPartitionType.PIPELINED)
.getSource();
final ExecutionGraph eg =
ExecutionGraphTestUtils.createExecutionGraph(
EXECUTOR_EXTENSION.getExecutor(), producer1, producer2, consumer);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID evIdOfProducer1 = new ExecutionVertexID(producer1.getID(), 0);
ExecutionVertexID evIdOfProducer2 = new ExecutionVertexID(producer2.getID(), 0);
ExecutionVertexID evIdOfConsumer = new ExecutionVertexID(consumer.getID(), 0);
Collection<ConsumedPartitionGroup> consumedPartitionGroupsOfProducer1 =
inputsLocationsRetriever.getConsumedPartitionGroups(evIdOfProducer1);
Collection<ConsumedPartitionGroup> consumedPartitionGroupsOfProducer2 =
inputsLocationsRetriever.getConsumedPartitionGroups(evIdOfProducer2);
Collection<ConsumedPartitionGroup> consumedPartitionGroupsOfConsumer =
inputsLocationsRetriever.getConsumedPartitionGroups(evIdOfConsumer);
IntermediateResultPartitionID partitionId1 =
new IntermediateResultPartitionID(dataSet1.getId(), 0);
IntermediateResultPartitionID partitionId2 =
new IntermediateResultPartitionID(dataSet2.getId(), 0);
assertThat(consumedPartitionGroupsOfProducer1).isEmpty();
assertThat(consumedPartitionGroupsOfProducer2).isEmpty();
assertThat(consumedPartitionGroupsOfConsumer).hasSize(2);
assertThat(
consumedPartitionGroupsOfConsumer.stream()
.flatMap(IterableUtils::toStream)
.collect(Collectors.toSet()))
.containsExactlyInAnyOrder(partitionId1, partitionId2);
for (ConsumedPartitionGroup consumedPartitionGroup : consumedPartitionGroupsOfConsumer) {
if (consumedPartitionGroup.getFirst().equals(partitionId1)) {
assertThat(
inputsLocationsRetriever.getProducersOfConsumedPartitionGroup(
consumedPartitionGroup))
.containsExactly(evIdOfProducer1);
} else {
assertThat(
inputsLocationsRetriever.getProducersOfConsumedPartitionGroup(
consumedPartitionGroup))
.containsExactly(evIdOfProducer2);
}
}
}
/** Tests that it will get empty task manager location if vertex is not scheduled. */
@Test
void testGetEmptyTaskManagerLocationIfVertexNotScheduled() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final ExecutionGraph eg =
ExecutionGraphTestUtils.createExecutionGraph(
EXECUTOR_EXTENSION.getExecutor(), jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0);
Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocation =
inputsLocationsRetriever.getTaskManagerLocation(executionVertexId);
assertThat(taskManagerLocation).isNotPresent();
}
/** Tests that it can get the task manager location in an Execution. */
@Test
void testGetTaskManagerLocationWhenScheduled() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final TestingLogicalSlot testingLogicalSlot =
new TestingLogicalSlotBuilder().createTestingLogicalSlot();
final ExecutionGraph eg =
ExecutionGraphTestUtils.createExecutionGraph(
EXECUTOR_EXTENSION.getExecutor(), jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
final ExecutionVertex onlyExecutionVertex = eg.getAllExecutionVertices().iterator().next();
onlyExecutionVertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
onlyExecutionVertex.deployToSlot(testingLogicalSlot);
ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0);
Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocationOptional =
inputsLocationsRetriever.getTaskManagerLocation(executionVertexId);
assertThat(taskManagerLocationOptional).isPresent();
final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture =
taskManagerLocationOptional.get();
assertThat(taskManagerLocationFuture.get())
.isEqualTo(testingLogicalSlot.getTaskManagerLocation());
}
/**
* Tests that it will throw exception when getting the task manager location of a non existing
* execution.
*/
@Test
void testGetNonExistingExecutionVertexWillThrowException() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final ExecutionGraph eg =
ExecutionGraphTestUtils.createExecutionGraph(
EXECUTOR_EXTENSION.getExecutor(), jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
ExecutionVertexID invalidExecutionVertexId = new ExecutionVertexID(new JobVertexID(), 0);
assertThatThrownBy(
() ->
inputsLocationsRetriever.getTaskManagerLocation(
invalidExecutionVertexId),
"Should throw exception if execution vertex doesn't exist!")
.isInstanceOf(IllegalStateException.class);
}
}
| ExecutionGraphToInputsLocationsRetrieverAdapterTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java | {
"start": 4910,
"end": 32839
} | class ____ {
public int num_files = 0;
public int num_locs = 0;
}
// block id to BlockLocs
final Map<String, BlockLocs> block_map = new HashMap<String, BlockLocs> ();
@BeforeEach
public void setUp() throws Exception {
// bring up a cluster of 2
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
// Allow a single volume failure (there are two volumes)
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
0, TimeUnit.MILLISECONDS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dataDir = new File(cluster.getDataDirectory());
}
@AfterEach
public void tearDown() throws Exception {
if(data_fail != null) {
FileUtil.setWritable(data_fail, true);
data_fail = null;
}
if(failedDir != null) {
FileUtil.setWritable(failedDir, true);
failedDir = null;
}
if(cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/*
* Verify the number of blocks and files are correct after volume failure,
* and that we can replicate to both datanodes even after a single volume
* failure if the configuration parameter allows this.
*/
@Test
@Timeout(value = 120)
public void testVolumeFailure() throws Exception {
System.out.println("Data dir: is " + dataDir.getPath());
// Data dir structure is dataDir/data[1-4]/[current,tmp...]
// data1,2 is for datanode 1, data2,3 - datanode2
String filename = "/test.txt";
Path filePath = new Path(filename);
// we use only small number of blocks to avoid creating subdirs in the data dir..
int filesize = block_size*blocks_num;
DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
DFSTestUtil.waitReplication(fs, filePath, repl);
System.out.println("file " + filename + "(size " +
filesize + ") is created and replicated");
// fail the volume
// delete/make non-writable one of the directories (failed volume)
data_fail = cluster.getInstanceStorageDir(1, 0);
failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
cluster.getNamesystem().getBlockPoolId());
if (failedDir.exists() &&
//!FileUtil.fullyDelete(failedDir)
!deteteBlocks(failedDir)
) {
throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
}
data_fail.setReadOnly();
failedDir.setReadOnly();
System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
// access all the blocks on the "failed" DataNode,
// we need to make sure that the "failed" volume is being accessed -
// and that will cause failure, blocks removal, "emergency" block report
triggerFailure(filename, filesize);
// DN eventually have latest volume failure information for next heartbeat
final DataNode dn = cluster.getDataNodes().get(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final VolumeFailureSummary summary =
dn.getFSDataset().getVolumeFailureSummary();
return summary != null &&
summary.getFailedStorageLocations() != null &&
summary.getFailedStorageLocations().length == 1;
}
}, 10, 30 * 1000);
// trigger DN to send heartbeat
DataNodeTestUtils.triggerHeartbeat(dn);
final BlockManager bm = cluster.getNamesystem().getBlockManager();
// trigger NN handel heartbeat
BlockManagerTestUtil.checkHeartbeat(bm);
// NN now should have latest volume failure
assertEquals(1, cluster.getNamesystem().getVolumeFailuresTotal());
// assert failedStorageLocations
assertTrue(dn.getFSDataset().getVolumeFailureSummary()
.getFailedStorageLocations()[0]
.contains("[DISK]"));
// verify number of blocks and files...
verify(filename, filesize);
// create another file (with one volume failed).
System.out.println("creating file test1.txt");
Path fileName1 = new Path("/test1.txt");
DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
// should be able to replicate to both nodes (2 DN, repl=2)
DFSTestUtil.waitReplication(fs, fileName1, repl);
System.out.println("file " + fileName1.getName() +
" is created and replicated");
}
/*
* If one of the sub-folders under the finalized directory is unreadable,
* either due to permissions or a filesystem corruption, the DN will fail
* to read it when scanning it for blocks to load into the replica map. This
* test ensures the DN does not exit and reports the failed volume to the
* NN (HDFS-14333). This is done by using a simulated FsDataset that throws
* an exception for a failed volume when the block pool is initialized.
*/
@Test
@Timeout(value = 15)
public void testDnStartsAfterDiskErrorScanningBlockPool() throws Exception {
// Don't use the cluster configured in the setup() method for this test.
cluster.shutdown(true);
cluster.close();
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
BadDiskFSDataset.Factory.class.getName());
final MiniDFSCluster localCluster = new MiniDFSCluster
.Builder(conf).numDataNodes(1).build();
try {
localCluster.waitActive();
DataNode dn = localCluster.getDataNodes().get(0);
try {
localCluster.waitDatanodeFullyStarted(dn, 3000);
} catch (TimeoutException e) {
fail("Datanode did not get fully started");
}
assertTrue(dn.isDatanodeUp());
// trigger DN to send heartbeat
DataNodeTestUtils.triggerHeartbeat(dn);
final BlockManager bm = localCluster.getNamesystem().getBlockManager();
// trigger NN handle heartbeat
BlockManagerTestUtil.checkHeartbeat(bm);
// NN now should have the failed volume
assertEquals(1, localCluster.getNamesystem().getVolumeFailuresTotal());
} finally {
localCluster.close();
}
}
/**
* Test that DataStorage and BlockPoolSliceStorage remove the failed volume
* after failure.
*/
@Test
@Timeout(value = 150)
public void testFailedVolumeBeingRemovedFromDataNode()
throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 2);
File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNode dn0 = cluster.getDataNodes().get(0);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol1));
// Verify dn0Vol1 has been completely removed from DN0.
// 1. dn0Vol1 is removed from DataStorage.
DataStorage storage = dn0.getStorage();
assertEquals(1, storage.getNumStorageDirs());
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(
dn0Vol1.getAbsolutePath()
));
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
assertEquals(1, bpsStorage.getNumStorageDirs());
for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(
dn0Vol1.getAbsolutePath()
));
}
// 2. dn0Vol1 is removed from FsDataset
FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
for (FsVolumeSpi volume : vols) {
assertFalse(new File(volume.getStorageLocation().getUri())
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
));
}
}
// 3. all blocks on dn0Vol1 have been removed.
for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
assertNotNull(replica.getVolume());
assertFalse(new File(replica.getVolume().getStorageLocation().getUri())
.getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
));
}
// 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
String[] dataDirStrs =
dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(1, dataDirStrs.length);
assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
}
/**
* Test DataNode stops when the number of failed volumes exceeds
* dfs.datanode.failed.volumes.tolerated .
*/
@Test
@Timeout(value = 10)
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
// make both data directories to fail on dn0
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
DataNode dn0 = cluster.getDataNodes().get(0);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol1));
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol2));
// DN0 should stop after the number of failure disks exceed tolerated
// value (1).
dn0.checkDiskError();
assertFalse(dn0.shouldRun());
}
/**
* Test that DN does not shutdown, as long as failure volumes being hot swapped.
*/
@Test
public void testVolumeFailureRecoveredByHotSwappingVolume()
throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
final DataNode dn0 = cluster.getDataNodes().get(0);
final String oldDataDirs = dn0.getConf().get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
// Fail dn0Vol1 first.
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol1));
// Hot swap out the failure volume.
String dataDirs = dn0Vol2.getPath();
assertThat(
dn0.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirs))
.isEqualTo(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
// Fix failure volume dn0Vol1 and remount it back.
DataNodeTestUtils.restoreDataDirFromFailure(dn0Vol1);
assertThat(
dn0.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs))
.isEqualTo(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
// Fail dn0Vol2. Now since dn0Vol1 has been fixed, DN0 has sufficient
// resources, thus it should keep running.
DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol2));
assertTrue(dn0.shouldRun());
}
/**
* Test {@link DataNode#refreshVolumes(String)} not deadLock with
* {@link BPOfferService#registrationSucceeded(BPServiceActor,
* DatanodeRegistration)}.
*/
@Test
@Timeout(value = 10)
public void testRefreshDeadLock() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
DataNodeFaultInjector.set(new DataNodeFaultInjector() {
public void delayWhenOfferServiceHoldLock() {
try {
latch.await();
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
DataNode dn = cluster.getDataNodes().get(0);
File volume = cluster.getInstanceStorageDir(0, 0);
String dataDirs = volume.getPath();
List<BPOfferService> allBpOs = dn.getAllBpOs();
BPOfferService service = allBpOs.get(0);
BPServiceActor actor = service.getBPServiceActors().get(0);
DatanodeRegistration bpRegistration = actor.getBpRegistration();
Thread register = new SubjectInheritingThread(() -> {
try {
service.registrationSucceeded(actor, bpRegistration);
} catch (IOException e) {
e.printStackTrace();
}
});
register.start();
String newdir = dataDirs + "tmp";
// Make sure service have get writelock
latch.countDown();
String result = dn.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newdir);
assertNotNull(result);
}
/**
* Test changing the number of volumes does not impact the disk failure
* tolerance.
*/
@Test
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
final File dn0VolNew = new File(dataDir, "data_new");
final DataNode dn0 = cluster.getDataNodes().get(0);
final String oldDataDirs = dn0.getConf().get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
// Add a new volume to DN0
assertThat(
dn0.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
oldDataDirs + "," + dn0VolNew.getAbsolutePath()))
.isEqualTo(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
// Fail dn0Vol1 first and hot swap it.
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol1));
assertTrue(dn0.shouldRun());
// Fail dn0Vol2, now dn0 should stop, because we only tolerate 1 disk failure.
DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
DataNodeTestUtils.waitForDiskError(dn0,
DataNodeTestUtils.getVolume(dn0, dn0Vol2));
dn0.checkDiskError();
assertFalse(dn0.shouldRun());
}
/**
* Test that there are under replication blocks after vol failures
*/
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
// Bring up one more datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
final BlockManager bm = cluster.getNamesystem().getBlockManager();
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)3);
// Fail the first volume on both datanodes
File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)3);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
// underReplicatedBlocks are due to failed volumes
long underReplicatedBlocks = bm.getLowRedundancyBlocksCount()
+ bm.getPendingReconstructionBlocksCount();
if (underReplicatedBlocks > 0) {
return true;
}
LOG.info("There is no under replicated block after volume failure.");
return false;
}
}, 500, 60000);
}
/**
* Test if there is volume failure, the DataNode will fail to start.
*
* We fail a volume by setting the parent directory non-writable.
*/
@Test
@Timeout(value = 120)
public void testDataNodeFailToStartWithVolumeFailure() throws Exception {
// Method to simulate volume failures is currently not supported on Windows.
assumeNotWindows();
failedDir = new File(dataDir, "failedDir");
assertTrue(failedDir.mkdir() && failedDir.setReadOnly(),
"Failed to fail a volume by setting it non-writable");
startNewDataNodeWithDiskFailure(new File(failedDir, "newDir1"), false);
}
/**
* DataNode will start and tolerate one failing disk according to config.
*
* We fail a volume by setting the parent directory non-writable.
*/
@Test
@Timeout(value = 120)
public void testDNStartAndTolerateOneVolumeFailure() throws Exception {
// Method to simulate volume failures is currently not supported on Windows.
assumeNotWindows();
failedDir = new File(dataDir, "failedDir");
assertTrue(failedDir.mkdir() && failedDir.setReadOnly(),
"Failed to fail a volume by setting it non-writable");
startNewDataNodeWithDiskFailure(new File(failedDir, "newDir1"), true);
}
/**
* Test if data directory is not readable/writable, DataNode won't start.
*/
@Test
@Timeout(value = 120)
public void testDNFailToStartWithDataDirNonWritable() throws Exception {
// Method to simulate volume failures is currently not supported on Windows.
assumeNotWindows();
final File readOnlyDir = new File(dataDir, "nonWritable");
assertTrue(readOnlyDir.mkdir() && readOnlyDir.setReadOnly(),
"Set the data dir permission non-writable");
startNewDataNodeWithDiskFailure(new File(readOnlyDir, "newDir1"), false);
}
/**
* DataNode will start and tolerate one non-writable data directory
* according to config.
*/
@Test
@Timeout(value = 120)
public void testDNStartAndTolerateOneDataDirNonWritable() throws Exception {
// Method to simulate volume failures is currently not supported on Windows.
assumeNotWindows();
final File readOnlyDir = new File(dataDir, "nonWritable");
assertTrue(readOnlyDir.mkdir() && readOnlyDir.setReadOnly(),
"Set the data dir permission non-writable");
startNewDataNodeWithDiskFailure(new File(readOnlyDir, "newDir1"), true);
}
/**
* @param badDataDir bad data dir, either disk failure or non-writable
* @param tolerated true if one volume failure is allowed else false
*/
private void startNewDataNodeWithDiskFailure(File badDataDir,
boolean tolerated) throws Exception {
final File data5 = new File(dataDir, "data5");
final String newDirs = badDataDir.toString() + "," + data5.toString();
final Configuration newConf = new Configuration(conf);
newConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
LOG.info("Setting dfs.datanode.data.dir for new DataNode as {}", newDirs);
newConf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
tolerated ? 1 : 0);
// bring up one more DataNode
assertEquals(repl, cluster.getDataNodes().size());
try {
cluster.startDataNodes(newConf, 1, false, null, null);
assertTrue(tolerated, "Failed to get expected IOException");
} catch (IOException ioe) {
assertFalse(tolerated, "Unexpected IOException " + ioe);
return;
}
assertEquals(repl + 1, cluster.getDataNodes().size());
// create new file and it should be able to replicate to 3 nodes
final Path p = new Path("/test1.txt");
DFSTestUtil.createFile(fs, p, block_size * blocks_num, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, p, (short) (repl + 1));
}
/**
* verifies two things:
* 1. number of locations of each block in the name node
* matches number of actual files
* 2. block files + pending block equals to total number of blocks that a file has
* including the replication (HDFS file has 30 blocks, repl=2 - total 60
* @param fn - file name
* @param fs - file size
* @throws IOException
*/
private void verify(String fn, int fs) throws IOException{
// now count how many physical blocks are there
int totalReal = countRealBlocks(block_map);
System.out.println("countRealBlocks counted " + totalReal + " blocks");
// count how many blocks store in NN structures.
int totalNN = countNNBlocks(block_map, fn, fs);
System.out.println("countNNBlocks counted " + totalNN + " blocks");
for(String bid : block_map.keySet()) {
BlockLocs bl = block_map.get(bid);
// System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
// number of physical files (1 or 2) should be same as number of datanodes
// in the list of the block locations
assertEquals(bl.num_files, bl.num_locs, "Num files should match num locations");
}
assertEquals(totalReal, totalNN, "Num physical blocks should match num stored in the NN");
// now check the number of under-replicated blocks
FSNamesystem fsn = cluster.getNamesystem();
// force update of all the metric counts by calling computeDatanodeWork
BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager());
// get all the counts
long underRepl = fsn.getUnderReplicatedBlocks();
long pendRepl = fsn.getPendingReplicationBlocks();
long totalRepl = underRepl + pendRepl;
System.out.println("underreplicated after = "+ underRepl +
" and pending repl =" + pendRepl + "; total underRepl = " + totalRepl);
System.out.println("total blocks (real and replicating):" +
(totalReal + totalRepl) + " vs. all files blocks " + blocks_num*2);
// together all the blocks should be equal to all real + all underreplicated
assertEquals(totalReal + totalRepl, blocks_num * repl, "Incorrect total block count");
}
/**
* go to each block on the 2nd DataNode until it fails...
* @param path
* @param size
* @throws IOException
*/
private void triggerFailure(String path, long size) throws IOException {
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks =
nn.getBlockLocations(path, 0, size).getLocatedBlocks();
for (LocatedBlock lb : locatedBlocks) {
DatanodeInfo dinfo = lb.getLocations()[1];
ExtendedBlock b = lb.getBlock();
try {
accessBlock(dinfo, lb);
} catch (IOException e) {
System.out.println("Failure triggered, on block: " + b.getBlockId() +
"; corresponding volume should be removed by now");
break;
}
}
}
/**
* simulate failure delete all the block files
* @param dir
* @throws IOException
*/
private boolean deteteBlocks(File dir) {
Collection<File> fileList = FileUtils.listFiles(dir,
TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE);
for(File f : fileList) {
if(f.getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
System.out.println("Deleting file " + f);
if(!f.delete())
return false;
}
}
return true;
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
setInetSocketAddress(targetAddr).
setBlock(block).
setFileName(BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", block.getBlockId())).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(0).
setVerifyChecksum(true).
setClientName("TestDataNodeVolumeFailure").
setDatanodeInfo(datanode).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
/**
* Count datanodes that have copies of the blocks for a file
* put it into the map
* @param map
* @param path
* @param size
* @return
* @throws IOException
*/
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size)
throws IOException {
int total = 0;
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks =
nn.getBlockLocations(path, 0, size).getLocatedBlocks();
//System.out.println("Number of blocks: " + locatedBlocks.size());
for(LocatedBlock lb : locatedBlocks) {
String blockId = ""+lb.getBlock().getBlockId();
//System.out.print(blockId + ": ");
DatanodeInfo[] dn_locs = lb.getLocations();
BlockLocs bl = map.get(blockId);
if(bl == null) {
bl = new BlockLocs();
}
//System.out.print(dn_info.name+",");
total += dn_locs.length;
bl.num_locs += dn_locs.length;
map.put(blockId, bl);
//System.out.println();
}
return total;
}
/**
* look for real blocks
* by counting *.meta files in all the storage dirs
* @param map
* @return
*/
private int countRealBlocks(Map<String, BlockLocs> map) {
int total = 0;
final String bpid = cluster.getNamesystem().getBlockPoolId();
for(int i=0; i<dn_num; i++) {
for(int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
if(dir == null) {
System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
continue;
}
List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir);
if(res == null) {
System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j);
continue;
}
//System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
//int ii = 0;
for(File f: res) {
String s = f.getName();
// cut off "blk_-" at the beginning and ".meta" at the end
assertNotNull(s, "Block file name should not be null");
String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
//System.out.println(ii++ + ". block " + s + "; id=" + bid);
BlockLocs val = map.get(bid);
if(val == null) {
val = new BlockLocs();
}
val.num_files ++; // one more file for the block
map.put(bid, val);
}
//System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
//System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);
total += res.size();
}
}
return total;
}
private static | BlockLocs |
java | quarkusio__quarkus | integration-tests/resteasy-jackson/src/test/java/io/quarkus/it/resteasy/jackson/ApplicationPropertiesOverrideIT.java | {
"start": 543,
"end": 795
} | class ____ {
@Test
void testEndpoint() {
given()
.when().get("/message")
.then()
.statusCode(200)
.body(containsString("Production"));
}
}
| ApplicationPropertiesOverrideIT |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/resource/ResourceInjectionTest.java | {
"start": 3919,
"end": 4122
} | class ____ {
@Produces
@PersistenceContext
EntityManager entityManager;
}
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
public @ | EEResourceField |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_wuzhengmao.java | {
"start": 895,
"end": 1254
} | class ____ {
int id;
Node parent;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Node getParent() {
return parent;
}
public void setParent(Node parent) {
this.parent = parent;
}
}
}
| Node |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/TimestampUtcAsInstantJdbcType.java | {
"start": 933,
"end": 4999
} | class ____ implements JdbcType {
public static final TimestampUtcAsInstantJdbcType INSTANCE = new TimestampUtcAsInstantJdbcType();
private static final Calendar UTC_CALENDAR = Calendar.getInstance( TimeZone.getTimeZone( "UTC" ) );
public TimestampUtcAsInstantJdbcType() {
}
@Override
public int getJdbcTypeCode() {
return Types.TIMESTAMP_WITH_TIMEZONE;
}
@Override
public int getDefaultSqlTypeCode() {
return SqlTypes.TIMESTAMP_UTC;
}
@Override
public String getFriendlyName() {
return "TIMESTAMP_UTC";
}
@Override
public String toString() {
return "TimestampUtcDescriptor";
}
@Override
public <T> JavaType<T> getJdbcRecommendedJavaTypeMapping(
Integer length,
Integer scale,
TypeConfiguration typeConfiguration) {
return typeConfiguration.getJavaTypeRegistry().getDescriptor( Instant.class );
}
@Override
public Class<?> getPreferredJavaTypeClass(WrapperOptions options) {
return Instant.class;
}
@Override
public <T> JdbcLiteralFormatter<T> getJdbcLiteralFormatter(JavaType<T> javaType) {
return new JdbcLiteralFormatterTemporal<>( javaType, TemporalType.TIMESTAMP );
}
@Override
public <X> ValueBinder<X> getBinder(final JavaType<X> javaType) {
return new BasicBinder<>( javaType, this ) {
@Override
protected void doBind(
PreparedStatement st,
X value,
int index,
WrapperOptions wrapperOptions) throws SQLException {
final Instant instant = javaType.unwrap( value, Instant.class, wrapperOptions );
try {
// supported by some databases (not required by JDBC)
st.setObject( index, instant, Types.TIMESTAMP_WITH_TIMEZONE );
}
catch (SQLException|AbstractMethodError e) {
// fall back to treating it as a JDBC Timestamp
st.setTimestamp( index, Timestamp.from( instant ), UTC_CALENDAR );
}
}
@Override
protected void doBind(
CallableStatement st,
X value,
String name,
WrapperOptions wrapperOptions)
throws SQLException {
final Instant instant = javaType.unwrap( value, Instant.class, wrapperOptions );
try {
// supported by some databases (not required by JDBC)
st.setObject( name, instant, Types.TIMESTAMP_WITH_TIMEZONE );
}
catch (SQLException|AbstractMethodError e) {
// fall back to treating it as a JDBC Timestamp
st.setTimestamp( name, Timestamp.from( instant ), UTC_CALENDAR );
}
}
};
}
@Override
public <X> ValueExtractor<X> getExtractor(final JavaType<X> javaType) {
return new BasicExtractor<>( javaType, this ) {
@Override
protected X doExtract(ResultSet rs, int position, WrapperOptions wrapperOptions) throws SQLException {
try {
// supported by some databases (not required by JDBC)
return javaType.wrap( rs.getObject( position, Instant.class ), wrapperOptions );
}
catch (SQLException|AbstractMethodError e) {
// fall back to treating it as a JDBC Timestamp
return javaType.wrap( rs.getTimestamp( position, UTC_CALENDAR ), wrapperOptions );
}
}
@Override
protected X doExtract(CallableStatement statement, int position, WrapperOptions wrapperOptions) throws SQLException {
try {
// supported by some databases (not required by JDBC)
return javaType.wrap( statement.getObject( position, Instant.class ), wrapperOptions );
}
catch (SQLException|AbstractMethodError e) {
// fall back to treating it as a JDBC Timestamp
return javaType.wrap( statement.getTimestamp( position, UTC_CALENDAR ), wrapperOptions );
}
}
@Override
protected X doExtract(CallableStatement statement, String name, WrapperOptions wrapperOptions) throws SQLException {
try {
// supported by some databases (not required by JDBC)
return javaType.wrap( statement.getObject( name, Instant.class ), wrapperOptions );
}
catch (SQLException|AbstractMethodError e) {
// fall back to treating it as a JDBC Timestamp
return javaType.wrap( statement.getTimestamp( name, UTC_CALENDAR ), wrapperOptions );
}
}
};
}
}
| TimestampUtcAsInstantJdbcType |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/injectionpoint/Vehicle.java | {
"start": 826,
"end": 1033
} | class ____ {
private final Engine engine;
Vehicle(@Cylinders(6) Engine engine) {
this.engine = engine;
}
String start() {
return engine.start();
}
}
// end::class[]
| Vehicle |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java | {
"start": 2780,
"end": 25213
} | class ____ extends AbstractFieldDataImplTestCase {
private void addField(Document d, String name, String value) {
d.add(new StringField(name, value, Field.Store.YES));
d.add(new SortedSetDocValuesField(name, new BytesRef(value)));
}
@Override
protected void fillSingleValueAllSet() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "1");
addField(d, "value", "1");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "3");
writer.addDocument(d);
}
@Override
protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
addField(d, "value", "4");
writer.addDocument(d);
writer.commit();
writer.deleteDocuments(new Term("_id", "1"));
}
@Override
protected void fillSingleValueWithMissing() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
// d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
writer.addDocument(d);
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "3");
writer.addDocument(d);
}
@Override
protected void fillMultiValueAllSet() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
addField(d, "value", "4");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
addField(d, "value", "1");
writer.addDocument(d);
writer.commit(); // TODO: Have tests with more docs for sorting
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "3");
writer.addDocument(d);
}
@Override
protected void fillMultiValueWithMissing() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
addField(d, "value", "4");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
// d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
writer.addDocument(d);
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "3");
writer.addDocument(d);
}
@Override
protected void fillAllMissing() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "3");
writer.addDocument(d);
}
@Override
protected void fillExtendedMvSet() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "02");
addField(d, "value", "04");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "03");
writer.addDocument(d);
writer.commit();
d = new Document();
addField(d, "_id", "4");
addField(d, "value", "04");
addField(d, "value", "05");
addField(d, "value", "06");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "5");
addField(d, "value", "06");
addField(d, "value", "07");
addField(d, "value", "08");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "6");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "7");
addField(d, "value", "08");
addField(d, "value", "09");
addField(d, "value", "10");
writer.addDocument(d);
writer.commit();
d = new Document();
addField(d, "_id", "8");
addField(d, "value", "!08");
addField(d, "value", "!09");
addField(d, "value", "!10");
writer.addDocument(d);
}
public void testActualMissingValue() throws IOException {
testActualMissingValue(false);
}
public void testActualMissingValueReverse() throws IOException {
testActualMissingValue(true);
}
public void testActualMissingValue(boolean reverse) throws IOException {
// missing value is set to an actual value
final String[] values = new String[randomIntBetween(2, 30)];
for (int i = 1; i < values.length; ++i) {
values[i] = TestUtil.randomUnicodeString(random());
}
final int numDocs = scaledRandomIntBetween(10, 3072);
for (int i = 0; i < numDocs; ++i) {
final String value = RandomPicks.randomFrom(random(), values);
if (value == null) {
writer.addDocument(new Document());
} else {
Document d = new Document();
addField(d, "value", value);
writer.addDocument(d);
}
if (randomInt(10) == 0) {
writer.commit();
}
}
final IndexFieldData<?> indexFieldData = getForField("value");
final String missingValue = values[1];
IndexSearcher searcher = newIndexSearcher(DirectoryReader.open(writer));
SortField sortField = indexFieldData.sortField(missingValue, MultiValueMode.MIN, null, reverse);
TopFieldDocs topDocs = searcher.search(
new MatchAllDocsQuery(),
randomBoolean() ? numDocs : randomIntBetween(10, numDocs),
new Sort(sortField)
);
assertEquals(numDocs, topDocs.totalHits.value());
BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final String docValue = searcher.storedFields().document(topDocs.scoreDocs[i].doc).get("value");
final BytesRef value = new BytesRef(docValue == null ? missingValue : docValue);
if (reverse) {
assertTrue(previousValue.compareTo(value) >= 0);
} else {
assertTrue(previousValue.compareTo(value) <= 0);
}
previousValue = value;
}
searcher.getIndexReader().close();
}
public void testSortMissingFirst() throws IOException {
testSortMissing(true, false);
}
public void testSortMissingFirstReverse() throws IOException {
testSortMissing(true, true);
}
public void testSortMissingLast() throws IOException {
testSortMissing(false, false);
}
public void testSortMissingLastReverse() throws IOException {
testSortMissing(false, true);
}
public void testSortMissing(boolean first, boolean reverse) throws IOException {
final String[] values = new String[randomIntBetween(2, 10)];
for (int i = 1; i < values.length; ++i) {
values[i] = TestUtil.randomUnicodeString(random());
}
final int numDocs = scaledRandomIntBetween(10, 3072);
for (int i = 0; i < numDocs; ++i) {
final String value = RandomPicks.randomFrom(random(), values);
if (value == null) {
writer.addDocument(new Document());
} else {
Document d = new Document();
addField(d, "value", value);
writer.addDocument(d);
}
if (randomInt(10) == 0) {
writer.commit();
}
}
final IndexFieldData<?> indexFieldData = getForField("value");
IndexSearcher searcher = newIndexSearcher(DirectoryReader.open(writer));
SortField sortField = indexFieldData.sortField(first ? "_first" : "_last", MultiValueMode.MIN, null, reverse);
TopFieldDocs topDocs = searcher.search(
new MatchAllDocsQuery(),
randomBoolean() ? numDocs : randomIntBetween(10, numDocs),
new Sort(sortField)
);
assertThat(topDocs.totalHits.value(), lessThanOrEqualTo((long) numDocs));
BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final String docValue = searcher.storedFields().document(topDocs.scoreDocs[i].doc).get("value");
if (first && docValue == null) {
assertNull(previousValue);
} else if (first == false && docValue != null) {
assertNotNull(previousValue);
}
final BytesRef value = docValue == null ? null : new BytesRef(docValue);
if (previousValue != null && value != null) {
if (reverse) {
assertTrue(previousValue.compareTo(value) >= 0);
} else {
assertTrue(previousValue.compareTo(value) <= 0);
}
}
previousValue = value;
}
searcher.getIndexReader().close();
}
public void testNestedSortingMin() throws IOException {
testNestedSorting(MultiValueMode.MIN);
}
public void testNestedSortingMax() throws IOException {
testNestedSorting(MultiValueMode.MAX);
}
public void testNestedSorting(MultiValueMode sortMode) throws IOException {
final String[] values = new String[randomIntBetween(2, 20)];
for (int i = 0; i < values.length; ++i) {
values[i] = TestUtil.randomSimpleString(random());
}
final int numParents = scaledRandomIntBetween(10, 3072);
List<Document> docs = new ArrayList<>();
FixedBitSet parents = new FixedBitSet(64);
for (int i = 0; i < numParents; ++i) {
docs.clear();
final int numChildren = randomInt(4);
for (int j = 0; j < numChildren; ++j) {
final Document child = new Document();
final int numValues = randomInt(3);
for (int k = 0; k < numValues; ++k) {
final String value = RandomPicks.randomFrom(random(), values);
addField(child, "text", value);
}
docs.add(child);
}
final Document parent = new Document();
parent.add(new StringField("type", "parent", Store.YES));
final String value = RandomPicks.randomFrom(random(), values);
if (value != null) {
addField(parent, "text", value);
}
docs.add(parent);
int bit = parents.prevSetBit(parents.length() - 1) + docs.size();
parents = FixedBitSet.ensureCapacity(parents, bit);
parents.set(bit);
writer.addDocuments(docs);
if (randomInt(10) == 0) {
writer.commit();
}
}
DirectoryReader directoryReader = DirectoryReader.open(writer);
directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = newIndexSearcher(directoryReader);
IndexFieldData<?> fieldData = getForField("text");
final Object missingValue = switch (randomInt(4)) {
case 0 -> "_first";
case 1 -> "_last";
case 2 -> new BytesRef(RandomPicks.randomFrom(random(), values));
default -> new BytesRef(TestUtil.randomSimpleString(random()));
};
Query parentFilter = new TermQuery(new Term("type", "parent"));
Query childFilter = Queries.not(parentFilter);
Nested nested = createNested(searcher, parentFilter, childFilter);
BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode, nested);
ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(
new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter),
ScoreMode.None
);
Sort sort = new Sort(new SortField("text", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
assertTrue(topDocs.scoreDocs.length > 0);
BytesRef previous = null;
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final int docID = topDocs.scoreDocs[i].doc;
assertTrue("expected " + docID + " to be a parent", parents.get(docID));
BytesRef cmpValue = null;
for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
String[] sVals = searcher.storedFields().document(child).getValues("text");
final BytesRef[] vals;
if (sVals.length == 0) {
vals = new BytesRef[0];
} else {
vals = new BytesRef[sVals.length];
for (int j = 0; j < vals.length; ++j) {
vals[j] = new BytesRef(sVals[j]);
}
}
for (BytesRef value : vals) {
if (cmpValue == null) {
cmpValue = value;
} else if (sortMode == MultiValueMode.MIN && value.compareTo(cmpValue) < 0) {
cmpValue = value;
} else if (sortMode == MultiValueMode.MAX && value.compareTo(cmpValue) > 0) {
cmpValue = value;
}
}
}
if (cmpValue == null) {
if ("_first".equals(missingValue)) {
cmpValue = new BytesRef();
} else if ("_last".equals(missingValue) == false) {
cmpValue = (BytesRef) missingValue;
}
}
if (previous != null && cmpValue != null) {
assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
}
previous = cmpValue;
}
searcher.getIndexReader().close();
}
public void testSingleValuedGlobalOrdinals() throws Exception {
Document d = new Document();
addField(d, "_id", "1");
addField(d, "value", "2");
writer.addDocument(d);
d = new Document();
addField(d, "_id", "2");
addField(d, "value", "1");
writer.addDocument(d);
// Force a second segment
writer.commit();
d = new Document();
addField(d, "_id", "3");
addField(d, "value", "3");
writer.addDocument(d);
refreshReader();
IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues());
IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
assertNotNull(globalOrdinals.getOrdinalMap());
assertThat(topLevelReader.leaves().size(), equalTo(2));
for (int l = 0; l < 2; l++) {
SortedSetDocValues ords = globalOrdinals.load(topLevelReader.leaves().get(l)).getOrdinalsValues();
assertThat(DocValues.unwrapSingleton(ords), instanceOf(SortedDocValues.class));
}
}
public void testGlobalOrdinals() throws Exception {
fillExtendedMvSet();
refreshReader();
IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues());
IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
assertNotNull(globalOrdinals.getOrdinalMap());
assertThat(topLevelReader.leaves().size(), equalTo(3));
// First segment
assertThat(globalOrdinals, instanceOf(GlobalOrdinalsIndexFieldData.Consumer.class));
LeafReaderContext leaf = topLevelReader.leaves().get(0);
LeafOrdinalsFieldData afd = globalOrdinals.load(leaf);
SortedSetDocValues values = afd.getOrdinalsValues();
assertTrue(values.advanceExact(0));
long ord = values.nextOrd();
assertThat(ord, equalTo(3L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("02"));
ord = values.nextOrd();
assertThat(ord, equalTo(5L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("04"));
assertFalse(values.advanceExact(1));
assertTrue(values.advanceExact(2));
ord = values.nextOrd();
assertThat(ord, equalTo(4L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("03"));
// Second segment
leaf = topLevelReader.leaves().get(1);
afd = globalOrdinals.load(leaf);
values = afd.getOrdinalsValues();
assertTrue(values.advanceExact(0));
ord = values.nextOrd();
assertThat(ord, equalTo(5L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("04"));
ord = values.nextOrd();
assertThat(ord, equalTo(6L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("05"));
ord = values.nextOrd();
assertThat(ord, equalTo(7L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("06"));
assertTrue(values.advanceExact(1));
ord = values.nextOrd();
assertThat(ord, equalTo(7L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("06"));
ord = values.nextOrd();
assertThat(ord, equalTo(8L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("07"));
ord = values.nextOrd();
assertThat(ord, equalTo(9L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("08"));
assertFalse(values.advanceExact(2));
assertTrue(values.advanceExact(3));
ord = values.nextOrd();
assertThat(ord, equalTo(9L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("08"));
ord = values.nextOrd();
assertThat(ord, equalTo(10L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("09"));
ord = values.nextOrd();
assertThat(ord, equalTo(11L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("10"));
// Third segment
leaf = topLevelReader.leaves().get(2);
afd = globalOrdinals.load(leaf);
values = afd.getOrdinalsValues();
assertTrue(values.advanceExact(0));
ord = values.nextOrd();
assertThat(ord, equalTo(0L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!08"));
ord = values.nextOrd();
assertThat(ord, equalTo(1L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!09"));
ord = values.nextOrd();
assertThat(ord, equalTo(2L));
assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!10"));
}
public void testTermsEnum() throws Exception {
fillExtendedMvSet();
writer.forceMerge(1);
List<LeafReaderContext> atomicReaderContexts = refreshReader();
IndexOrdinalsFieldData ifd = getForField("value");
for (LeafReaderContext atomicReaderContext : atomicReaderContexts) {
LeafOrdinalsFieldData afd = ifd.load(atomicReaderContext);
TermsEnum termsEnum = afd.getOrdinalsValues().termsEnum();
int size = 0;
while (termsEnum.next() != null) {
size++;
}
assertThat(size, equalTo(12));
assertThat(termsEnum.seekExact(new BytesRef("10")), is(true));
assertThat(termsEnum.term().utf8ToString(), equalTo("10"));
assertThat(termsEnum.next(), nullValue());
assertThat(termsEnum.seekExact(new BytesRef("08")), is(true));
assertThat(termsEnum.term().utf8ToString(), equalTo("08"));
size = 0;
while (termsEnum.next() != null) {
size++;
}
assertThat(size, equalTo(2));
termsEnum.seekExact(8);
assertThat(termsEnum.term().utf8ToString(), equalTo("07"));
size = 0;
while (termsEnum.next() != null) {
size++;
}
assertThat(size, equalTo(3));
}
}
public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception {
fillExtendedMvSet();
refreshReader();
IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues());
IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
assertNotNull(globalOrdinals.getOrdinalMap());
assertThat(ifd.loadGlobal(topLevelReader).getOrdinalMap(), sameInstance(globalOrdinals.getOrdinalMap()));
// 3 b/c 1 segment level caches and 1 top level cache
// in case of doc values, we don't cache atomic FD, so only the top-level cache is there
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L));
IndexOrdinalsFieldData cachedInstance = null;
for (Accountable ramUsage : indicesFieldDataCache.getCache().values()) {
if (ramUsage instanceof IndexOrdinalsFieldData) {
cachedInstance = (IndexOrdinalsFieldData) ramUsage;
break;
}
}
assertNotSame(cachedInstance, globalOrdinals);
assertThat(cachedInstance.getOrdinalMap(), sameInstance(globalOrdinals.getOrdinalMap()));
topLevelReader.close();
// Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L));
refreshReader();
assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals)));
indexService.clearCaches(false, true);
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(0L));
}
}
| AbstractStringFieldDataTestCase |
java | quarkusio__quarkus | integration-tests/smallrye-context-propagation/src/test/java/io/quarkus/context/test/customContext/CustomContextTest.java | {
"start": 645,
"end": 1589
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(CustomContextTest.class, CustomContext.class, CustomContextProvider.class)
.addAsServiceProvider(ThreadContextProvider.class, CustomContextProvider.class));
@Inject
ThreadContext tc;
@Test
public void testCustomContextPropagation() throws Exception {
ExecutorService executor = Executors.newSingleThreadExecutor();
// set something to custom context
CustomContext.set("foo");
CompletableFuture<String> ret = tc.withContextCapture(CompletableFuture.completedFuture("void"));
CompletableFuture<Void> cfs = ret.thenApplyAsync(text -> {
Assertions.assertEquals("foo", CustomContext.get());
return null;
}, executor);
cfs.get();
}
}
| CustomContextTest |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/codec/json/CustomizedJackson2JsonEncoderTests.java | {
"start": 3263,
"end": 3576
} | class ____ extends Jackson2JsonEncoder {
@Override
protected ObjectWriter customizeWriter(
ObjectWriter writer, MimeType mimeType, ResolvableType elementType, Map<String, Object> hints) {
return writer.with(SerializationFeature.WRITE_ENUMS_USING_TO_STRING);
}
}
}
| Jackson2JsonEncoderWithCustomization |
java | google__dagger | javatests/dagger/android/processor/ContributesAndroidInjectorTest.java | {
"start": 4484,
"end": 5278
} | class ____ {",
" @ContributesAndroidInjector",
" abstract <T> ParameterizedActivity<T> test();",
"}");
compile(module, TEST_ACTIVITY, parameterizedActivity)
.compile(
subject -> {
subject.compilationDidFail();
subject
.hasErrorContaining("cannot return parameterized types")
.onLineContaining("test()");
});
}
@Test
public void moduleIsntModule() {
Source module =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.android.ContributesAndroidInjector;",
"",
"@Module",
"abstract | TestModule |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/RecursiveAssertionAssert.java | {
"start": 11882,
"end": 13264
} | class ____ {
* int number;
* String street;
* }
*
* Person sherlock = new Person("Sherlock", "Detective");
* sherlock.address.street = "Baker Street";
* sherlock.address.number = 221;
*
* // assertion succeeds because Person has only String fields except for address (address fields are ignored)
* assertThat(sherlock).usingRecursiveAssertion()
* .ignoringFieldsOfTypes(Address.class)
* .allFieldsSatisfy(field -> field instanceof String);
*
* // assertion fails because of address and address.number fields
* assertThat(sherlock).usingRecursiveAssertion()
* .allFieldsSatisfy(field -> field instanceof String);</code></pre>
*
* @param typesToIgnore the types we want to ignore in the object under test fields.
* @return this {@link RecursiveAssertionAssert} to chain other methods.
*/
public RecursiveAssertionAssert ignoringFieldsOfTypes(Class<?>... typesToIgnore) {
recursiveAssertionConfiguration.ignoreFieldsOfTypes(typesToIgnore);
return this;
}
/**
* Make the recursive assertion <b>not to run</b> the {@link Predicate} over the primitive fields of an object in an object graph,
* by default asserting over primitives is <em>enabled</em>.
* <p>
* For example, consider the following class:
* <pre><code class='java'> | Address |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/output/CommandOutputResolverSupport.java | {
"start": 109,
"end": 234
} | class ____ {@link CommandOutputFactory} resolution such as {@link OutputRegistryCommandOutputFactoryResolver}.
* <p>
* This | for |
java | spring-projects__spring-boot | module/spring-boot-restdocs/src/main/java/org/springframework/boot/restdocs/test/autoconfigure/RestDocsMockMvcConfigurationCustomizer.java | {
"start": 1281,
"end": 1491
} | interface ____ {
/**
* Customize the given {@code configurer}.
* @param configurer the configurer
*/
void customize(MockMvcRestDocumentationConfigurer configurer);
}
| RestDocsMockMvcConfigurationCustomizer |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerHeartbeatResponse.java | {
"start": 1164,
"end": 1897
} | class ____
implements LocalizerHeartbeatResponse {
LocalizerAction action;
List<ResourceLocalizationSpec> resourceSpecs;
public MockLocalizerHeartbeatResponse(
LocalizerAction action, List<ResourceLocalizationSpec> resources) {
this.action = action;
this.resourceSpecs = resources;
}
public LocalizerAction getLocalizerAction() { return action; }
public void setLocalizerAction(LocalizerAction action) {
this.action = action;
}
@Override
public List<ResourceLocalizationSpec> getResourceSpecs() {
return resourceSpecs;
}
@Override
public void setResourceSpecs(List<ResourceLocalizationSpec> resourceSpecs) {
this.resourceSpecs = resourceSpecs;
}
}
| MockLocalizerHeartbeatResponse |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java | {
"start": 459,
"end": 527
} | class ____ generated. Edit {@code X-State.java.st} instead.
*/
final | is |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.