language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/builder/BuilderErrorHandlingTest.java | {
"start": 466,
"end": 587
} | class ____ extends DatabindTestUtil
{
@JsonDeserialize(builder=SimpleBuilderXY.class)
static | BuilderErrorHandlingTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java | {
"start": 7651,
"end": 9581
} | class ____. */
private static final Logger LOG = LoggerFactory.getLogger(Task.class);
/** The thread group that contains all task threads. */
private static final ThreadGroup TASK_THREADS_GROUP = new ThreadGroup("Flink Task Threads");
/** For atomic state updates. */
private static final AtomicReferenceFieldUpdater<Task, ExecutionState> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(
Task.class, ExecutionState.class, "executionState");
// ------------------------------------------------------------------------
// Constant fields that are part of the initial Task construction
// ------------------------------------------------------------------------
/** The job that the task belongs to. */
private final JobID jobId;
/** The type of this job. */
private final JobType jobType;
/** The vertex in the JobGraph whose code the task executes. */
private final JobVertexID vertexId;
/** The execution attempt of the parallel subtask. */
private final ExecutionAttemptID executionId;
/** ID which identifies the slot in which the task is supposed to run. */
private final AllocationID allocationId;
/** The meta information of current job. */
private final JobInfo jobInfo;
/** The meta information of current task. */
private final TaskInfo taskInfo;
/** The name of the task, including subtask indexes. */
private final String taskNameWithSubtask;
/** The job-wide configuration object. */
private final Configuration jobConfiguration;
/** The task-specific configuration. */
private final Configuration taskConfiguration;
/** The jar files used by this task. */
private final Collection<PermanentBlobKey> requiredJarFiles;
/** The classpaths used by this task. */
private final Collection<URL> requiredClasspaths;
/** The name of the | logger |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedInvocationNameFormatterTests.java | {
"start": 12033,
"end": 16349
} | class ____ {
@ParameterizedTest
@CsvSource(delimiterString = "->", textBlock = """
'Jane Smith' -> 'Jane Smith'
\\ -> \\\\
" -> \\"
# The following represents a single ' enclosed in ''.
'''' -> ''''
'\n' -> \\n
'\r\n' -> \\r\\n
' \t ' -> ' \\t '
'\b' -> \\b
'\f' -> \\f
'\u0007' -> '\u0007'
""")
void quotedStrings(String argument, String expected) {
var formatter = formatter(DEFAULT_DISPLAY_NAME, "IGNORED");
var formattedName = format(formatter, 1, arguments(argument));
assertThat(formattedName).isEqualTo("[1] " + '"' + expected + '"');
}
@ParameterizedTest
@CsvSource(quoteCharacter = '"', delimiterString = "->", textBlock = """
X -> X
\\ -> \\\\
' -> \\'
# The following represents a single " enclosed in "". The escaping is
# necessary, because three " characters in a row close the text block.
\"""\" -> \"""\"
"\n" -> \\n
"\r" -> \\r
"\t" -> \\t
"\b" -> \\b
"\f" -> \\f
"\u0007" -> "\u0007"
""")
void quotedCharacters(char argument, String expected) {
var formatter = formatter(DEFAULT_DISPLAY_NAME, "IGNORED");
var formattedName = format(formatter, 1, arguments(argument));
assertThat(formattedName).isEqualTo("[1] " + "'" + expected + "'");
}
@Test
void quotedStringsForArgumentsWithNames() {
var testMethod = ParameterizedTestCases.getMethod("processFruit", String.class, int.class);
var formatter = formatter(DEFAULT_DISPLAY_NAME, "IGNORED", testMethod);
var name1 = format(formatter, 1, arguments("apple", 42));
var name2 = format(formatter, 2, arguments("banana", 99));
assertThat(name1).isEqualTo("[1] fruit = \"apple\", ranking = 42");
assertThat(name2).isEqualTo("[2] fruit = \"banana\", ranking = 99");
}
@Test
void quotedStringsForArgumentsWithNamesAndNamedArguments() {
var testMethod = ParameterizedTestCases.getMethod("processFruit", String.class, int.class);
var formatter = formatter(DEFAULT_DISPLAY_NAME, "IGNORED", testMethod);
var name1 = format(formatter, 1, arguments(named("Apple", "apple"), 42));
var name2 = format(formatter, 2, arguments(named("Banana", "banana"), 99));
assertThat(name1).isEqualTo("[1] fruit = Apple, ranking = 42");
assertThat(name2).isEqualTo("[2] fruit = Banana, ranking = 99");
}
@Test
void quotedStringsForArgumentsWithNamesAndParameterNameAndArgument() {
var testMethod = ParameterizedTestCases.getMethod("processFruit", String.class, int.class);
var formatter = formatter(DEFAULT_DISPLAY_NAME, "IGNORED", testMethod);
var name1 = format(formatter, 1, arguments(new ParameterNameAndArgument("FRUIT", "apple"), 42));
var name2 = format(formatter, 2, arguments(new ParameterNameAndArgument("FRUCHT", "Banane"), 99));
assertThat(name1).isEqualTo("[1] FRUIT = \"apple\", ranking = 42");
assertThat(name2).isEqualTo("[2] FRUCHT = \"Banane\", ranking = 99");
}
}
// -------------------------------------------------------------------------
private static ParameterizedInvocationNameFormatter formatter(String pattern, String displayName) {
return formatter(pattern, displayName, 512);
}
private static ParameterizedInvocationNameFormatter formatter(String pattern, String displayName,
int argumentMaxLength) {
ParameterizedDeclarationContext<?> context = mock();
when(context.getResolverFacade()).thenReturn(mock());
when(context.getAnnotationName()).thenReturn(ParameterizedTest.class.getSimpleName());
return new ParameterizedInvocationNameFormatter(pattern, displayName, context, argumentMaxLength);
}
private static ParameterizedInvocationNameFormatter formatter(String pattern, String displayName, Method method) {
var context = new ParameterizedTestContext(method.getDeclaringClass(), method,
method.getAnnotation(ParameterizedTest.class));
return new ParameterizedInvocationNameFormatter(pattern, displayName, context, 512);
}
private static String format(ParameterizedInvocationNameFormatter formatter, int invocationIndex,
Arguments arguments) {
return formatter.format(invocationIndex, EvaluatedArgumentSet.allOf(arguments), true);
}
@NullUnmarked
private static | QuotedTextTests |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/attribute/VertxMDCDataAttribute.java | {
"start": 310,
"end": 852
} | class ____ implements ExchangeAttribute {
private final String dataKey;
public VertxMDCDataAttribute(String dataKey) {
this.dataKey = dataKey;
}
@Override
public String readAttribute(RoutingContext exchange) {
VertxMDC mdc = VertxMDC.INSTANCE;
return mdc.get(dataKey);
}
@Override
public void writeAttribute(RoutingContext exchange, String newValue) throws ReadOnlyAttributeException {
throw new ReadOnlyAttributeException();
}
public static final | VertxMDCDataAttribute |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/compression/CompressorFactory.java | {
"start": 886,
"end": 1387
} | class ____ {
private static final EnumMap<CompressionType, Compressor> COMPRESSORS;
static {
COMPRESSORS = new EnumMap<>(CompressionType.class);
COMPRESSORS.put(CompressionType.LZ4, new Lz4Compressor());
COMPRESSORS.put(CompressionType.ZSTD, new ZstdCompressor());
COMPRESSORS.put(CompressionType.ZLIB, new ZlibCompressor());
}
public static Compressor getCompressor(CompressionType type) {
return COMPRESSORS.get(type);
}
}
| CompressorFactory |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_Spliterator_Test.java | {
"start": 917,
"end": 1169
} | class ____ {
@Test
void should_create_Assert() {
// WHEN
SpliteratorAssert<String> assertion = assertThat(Stream.of("Luke", "Leia").spliterator());
// THEN
then(assertion).isNotNull();
}
}
| Assertions_assertThat_with_Spliterator_Test |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/jdbc/Sql.java | {
"start": 1056,
"end": 1705
} | class ____ test method to configure
* SQL {@link #scripts} and {@link #statements} to be executed against a given
* database during integration tests.
*
* <p>Method-level declarations override class-level declarations by default,
* but this behavior can be configured via {@link SqlMergeMode @SqlMergeMode}.
* However, this does not apply to class-level declarations configured for the
* {@link ExecutionPhase#BEFORE_TEST_CLASS BEFORE_TEST_CLASS} or
* {@link ExecutionPhase#AFTER_TEST_CLASS AFTER_TEST_CLASS} execution phase. Such
* declarations cannot be overridden, and the corresponding scripts and statements
* will be executed once per | or |
java | apache__spark | common/kvstore/src/main/java/org/apache/spark/util/kvstore/ArrayWrappers.java | {
"start": 1474,
"end": 2175
} | class ____ {
@SuppressWarnings("unchecked")
public static Comparable<Object> forArray(Object a) {
JavaUtils.checkArgument(a.getClass().isArray(), "Input should be an array");
Comparable<?> ret;
if (a instanceof int[] ia) {
ret = new ComparableIntArray(ia);
} else if (a instanceof long[] la) {
ret = new ComparableLongArray(la);
} else if (a instanceof byte[] ba) {
ret = new ComparableByteArray(ba);
} else {
JavaUtils.checkArgument(!a.getClass().getComponentType().isPrimitive(),
"Array element is primitive");
ret = new ComparableObjectArray((Object[]) a);
}
return (Comparable<Object>) ret;
}
private static | ArrayWrappers |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java | {
"start": 1491,
"end": 16372
} | class ____ implements GroupingAggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("xVal", ElementType.DOUBLE),
new IntermediateStateDesc("xDel", ElementType.DOUBLE),
new IntermediateStateDesc("yVal", ElementType.DOUBLE),
new IntermediateStateDesc("yDel", ElementType.DOUBLE),
new IntermediateStateDesc("count", ElementType.LONG) );
private final CentroidPointAggregator.GroupingCentroidState state;
private final List<Integer> channels;
private final DriverContext driverContext;
public SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction(List<Integer> channels,
CentroidPointAggregator.GroupingCentroidState state, DriverContext driverContext) {
this.channels = channels;
this.state = state;
this.driverContext = driverContext;
}
public static SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction create(
List<Integer> channels, DriverContext driverContext) {
return new SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction(channels, SpatialCentroidCartesianPointDocValuesAggregator.initGrouping(driverContext.bigArrays()), driverContext);
}
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds,
Page page) {
LongBlock encodedBlock = page.getBlock(channels.get(0));
LongVector encodedVector = encodedBlock.asVector();
if (encodedVector == null) {
maybeEnableGroupIdTracking(seenGroupIds, encodedBlock);
return new GroupingAggregatorFunction.AddInput() {
@Override
public void add(int positionOffset, IntArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, encodedBlock);
}
@Override
public void add(int positionOffset, IntBigArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, encodedBlock);
}
@Override
public void add(int positionOffset, IntVector groupIds) {
addRawInput(positionOffset, groupIds, encodedBlock);
}
@Override
public void close() {
}
};
}
return new GroupingAggregatorFunction.AddInput() {
@Override
public void add(int positionOffset, IntArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, encodedVector);
}
@Override
public void add(int positionOffset, IntBigArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, encodedVector);
}
@Override
public void add(int positionOffset, IntVector groupIds) {
addRawInput(positionOffset, groupIds, encodedVector);
}
@Override
public void close() {
}
};
}
private void addRawInput(int positionOffset, IntArrayBlock groups, LongBlock encodedBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
if (encodedBlock.isNull(valuesPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int encodedStart = encodedBlock.getFirstValueIndex(valuesPosition);
int encodedEnd = encodedStart + encodedBlock.getValueCount(valuesPosition);
for (int encodedOffset = encodedStart; encodedOffset < encodedEnd; encodedOffset++) {
long encodedValue = encodedBlock.getLong(encodedOffset);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
}
}
private void addRawInput(int positionOffset, IntArrayBlock groups, LongVector encodedVector) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
long encodedValue = encodedVector.getLong(valuesPosition);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
}
@Override
public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block xValUncast = page.getBlock(channels.get(0));
if (xValUncast.areAllValuesNull()) {
return;
}
DoubleVector xVal = ((DoubleBlock) xValUncast).asVector();
Block xDelUncast = page.getBlock(channels.get(1));
if (xDelUncast.areAllValuesNull()) {
return;
}
DoubleVector xDel = ((DoubleBlock) xDelUncast).asVector();
Block yValUncast = page.getBlock(channels.get(2));
if (yValUncast.areAllValuesNull()) {
return;
}
DoubleVector yVal = ((DoubleBlock) yValUncast).asVector();
Block yDelUncast = page.getBlock(channels.get(3));
if (yDelUncast.areAllValuesNull()) {
return;
}
DoubleVector yDel = ((DoubleBlock) yDelUncast).asVector();
Block countUncast = page.getBlock(channels.get(4));
if (countUncast.areAllValuesNull()) {
return;
}
LongVector count = ((LongBlock) countUncast).asVector();
assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int valuesPosition = groupPosition + positionOffset;
SpatialCentroidCartesianPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(valuesPosition), xDel.getDouble(valuesPosition), yVal.getDouble(valuesPosition), yDel.getDouble(valuesPosition), count.getLong(valuesPosition));
}
}
}
private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongBlock encodedBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
if (encodedBlock.isNull(valuesPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int encodedStart = encodedBlock.getFirstValueIndex(valuesPosition);
int encodedEnd = encodedStart + encodedBlock.getValueCount(valuesPosition);
for (int encodedOffset = encodedStart; encodedOffset < encodedEnd; encodedOffset++) {
long encodedValue = encodedBlock.getLong(encodedOffset);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
}
}
private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongVector encodedVector) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
long encodedValue = encodedVector.getLong(valuesPosition);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
}
@Override
public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block xValUncast = page.getBlock(channels.get(0));
if (xValUncast.areAllValuesNull()) {
return;
}
DoubleVector xVal = ((DoubleBlock) xValUncast).asVector();
Block xDelUncast = page.getBlock(channels.get(1));
if (xDelUncast.areAllValuesNull()) {
return;
}
DoubleVector xDel = ((DoubleBlock) xDelUncast).asVector();
Block yValUncast = page.getBlock(channels.get(2));
if (yValUncast.areAllValuesNull()) {
return;
}
DoubleVector yVal = ((DoubleBlock) yValUncast).asVector();
Block yDelUncast = page.getBlock(channels.get(3));
if (yDelUncast.areAllValuesNull()) {
return;
}
DoubleVector yDel = ((DoubleBlock) yDelUncast).asVector();
Block countUncast = page.getBlock(channels.get(4));
if (countUncast.areAllValuesNull()) {
return;
}
LongVector count = ((LongBlock) countUncast).asVector();
assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int valuesPosition = groupPosition + positionOffset;
SpatialCentroidCartesianPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(valuesPosition), xDel.getDouble(valuesPosition), yVal.getDouble(valuesPosition), yDel.getDouble(valuesPosition), count.getLong(valuesPosition));
}
}
}
private void addRawInput(int positionOffset, IntVector groups, LongBlock encodedBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
int valuesPosition = groupPosition + positionOffset;
if (encodedBlock.isNull(valuesPosition)) {
continue;
}
int groupId = groups.getInt(groupPosition);
int encodedStart = encodedBlock.getFirstValueIndex(valuesPosition);
int encodedEnd = encodedStart + encodedBlock.getValueCount(valuesPosition);
for (int encodedOffset = encodedStart; encodedOffset < encodedEnd; encodedOffset++) {
long encodedValue = encodedBlock.getLong(encodedOffset);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
}
private void addRawInput(int positionOffset, IntVector groups, LongVector encodedVector) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
int valuesPosition = groupPosition + positionOffset;
int groupId = groups.getInt(groupPosition);
long encodedValue = encodedVector.getLong(valuesPosition);
SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, encodedValue);
}
}
@Override
public void addIntermediateInput(int positionOffset, IntVector groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block xValUncast = page.getBlock(channels.get(0));
if (xValUncast.areAllValuesNull()) {
return;
}
DoubleVector xVal = ((DoubleBlock) xValUncast).asVector();
Block xDelUncast = page.getBlock(channels.get(1));
if (xDelUncast.areAllValuesNull()) {
return;
}
DoubleVector xDel = ((DoubleBlock) xDelUncast).asVector();
Block yValUncast = page.getBlock(channels.get(2));
if (yValUncast.areAllValuesNull()) {
return;
}
DoubleVector yVal = ((DoubleBlock) yValUncast).asVector();
Block yDelUncast = page.getBlock(channels.get(3));
if (yDelUncast.areAllValuesNull()) {
return;
}
DoubleVector yDel = ((DoubleBlock) yDelUncast).asVector();
Block countUncast = page.getBlock(channels.get(4));
if (countUncast.areAllValuesNull()) {
return;
}
LongVector count = ((LongBlock) countUncast).asVector();
assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
int groupId = groups.getInt(groupPosition);
int valuesPosition = groupPosition + positionOffset;
SpatialCentroidCartesianPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(valuesPosition), xDel.getDouble(valuesPosition), yVal.getDouble(valuesPosition), yDel.getDouble(valuesPosition), count.getLong(valuesPosition));
}
}
private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, LongBlock encodedBlock) {
if (encodedBlock.mayHaveNulls()) {
state.enableGroupIdTracking(seenGroupIds);
}
}
@Override
public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) {
state.enableGroupIdTracking(seenGroupIds);
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) {
state.toIntermediate(blocks, offset, selected, driverContext);
}
@Override
public void evaluateFinal(Block[] blocks, int offset, IntVector selected,
GroupingAggregatorEvaluationContext ctx) {
blocks[offset] = SpatialCentroidCartesianPointDocValuesAggregator.evaluateFinal(state, selected, ctx);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName()).append("[");
sb.append("channels=").append(channels);
sb.append("]");
return sb.toString();
}
@Override
public void close() {
state.close();
}
}
| SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/bind/support/SessionStatus.java | {
"start": 687,
"end": 1293
} | interface ____ can be injected into handler methods, allowing them to
* signal that their session processing is complete. The handler invoker may
* then follow up with appropriate cleanup, for example, of session attributes which
* have been implicitly created during this handler's processing (according to
* the
* {@link org.springframework.web.bind.annotation.SessionAttributes @SessionAttributes}
* annotation).
*
* @author Juergen Hoeller
* @since 2.5
* @see org.springframework.web.bind.annotation.RequestMapping
* @see org.springframework.web.bind.annotation.SessionAttributes
*/
public | that |
java | apache__camel | components/camel-file/src/main/java/org/apache/camel/component/file/GenericFileProcessStrategy.java | {
"start": 953,
"end": 3749
} | interface ____<T> {
/**
* Allows custom logic to be run on first poll preparing the strategy, such as removing old lock files etc.
*
* @param operations file operations
* @param endpoint the endpoint
* @throws Exception can be thrown in case of errors which causes poll to fail
*/
void prepareOnStartup(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint) throws Exception;
/**
* Called when work is about to begin on this file. This method may attempt to acquire some file lock before
* returning true; returning false if the file lock could not be obtained so that the file should be ignored.
*
* @param operations file operations
* @param endpoint the endpoint
* @param exchange the exchange
* @param file the file
* @return true if the file can be processed (such as if a file lock could be obtained)
* @throws Exception can be thrown in case of errors
*/
boolean begin(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception;
/**
* Called when a begin is aborted, for example to release any resources which may have been acquired during the
* {@link #begin(GenericFileOperations, GenericFileEndpoint, org.apache.camel.Exchange, GenericFile)} operation.
*
* @param operations file operations
* @param endpoint the endpoint
* @param exchange the exchange
* @param file the file
* @throws Exception can be thrown in case of errors
*/
void abort(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception;
/**
* Releases any file locks and possibly deletes or moves the file after successful processing
*
* @param operations file operations
* @param endpoint the endpoint
* @param exchange the exchange
* @param file the file
* @throws Exception can be thrown in case of errors
*/
void commit(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception;
/**
* Releases any file locks and possibly deletes or moves the file after unsuccessful processing
*
* @param operations file operations
* @param endpoint the endpoint
* @param exchange the exchange
* @param file the file
* @throws Exception can be thrown in case of errors
*/
void rollback(GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception;
}
| GenericFileProcessStrategy |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerUpgradeTest.java | {
"start": 1495,
"end": 2994
} | class ____ extends TypeSerializerUpgradeTestBase<TestEnum, TestEnum> {
private static final String SPEC_NAME = "enum-serializer";
public Collection<TestSpecification<?, ?>> createTestSpecifications(FlinkVersion flinkVersion)
throws Exception {
ArrayList<TestSpecification<?, ?>> testSpecifications = new ArrayList<>();
testSpecifications.add(
new TestSpecification<>(
SPEC_NAME,
flinkVersion,
EnumSerializerSetup.class,
EnumSerializerVerifier.class));
testSpecifications.add(
new TestSpecification<>(
SPEC_NAME + "reconfig",
flinkVersion,
EnumSerializerReconfigSetup.class,
EnumSerializerReconfigVerifier.class));
return testSpecifications;
}
private static Condition<? extends TypeSerializer<TestEnum>> enumSerializerWith(
final TestEnum[] expectedEnumValues) {
return new Condition<EnumSerializer<TestEnum>>(
value -> Arrays.equals(value.getValues(), expectedEnumValues), "");
}
// ----------------------------------------------------------------------------------------------
// Specification for "enum-serializer"
// ----------------------------------------------------------------------------------------------
/**
* This | EnumSerializerUpgradeTest |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2S3ComponentBuilderFactory.java | {
"start": 42854,
"end": 53078
} | class ____
extends AbstractComponentBuilder<AWS2S3Component>
implements Aws2S3ComponentBuilder {
@Override
protected AWS2S3Component buildConcreteComponent() {
return new AWS2S3Component();
}
private org.apache.camel.component.aws2.s3.AWS2S3Configuration getOrCreateConfiguration(AWS2S3Component component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.aws2.s3.AWS2S3Configuration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "autoCreateBucket": getOrCreateConfiguration((AWS2S3Component) component).setAutoCreateBucket((boolean) value); return true;
case "configuration": ((AWS2S3Component) component).setConfiguration((org.apache.camel.component.aws2.s3.AWS2S3Configuration) value); return true;
case "delimiter": getOrCreateConfiguration((AWS2S3Component) component).setDelimiter((java.lang.String) value); return true;
case "forcePathStyle": getOrCreateConfiguration((AWS2S3Component) component).setForcePathStyle((boolean) value); return true;
case "ignoreBody": getOrCreateConfiguration((AWS2S3Component) component).setIgnoreBody((boolean) value); return true;
case "overrideEndpoint": getOrCreateConfiguration((AWS2S3Component) component).setOverrideEndpoint((boolean) value); return true;
case "pojoRequest": getOrCreateConfiguration((AWS2S3Component) component).setPojoRequest((boolean) value); return true;
case "policy": getOrCreateConfiguration((AWS2S3Component) component).setPolicy((java.lang.String) value); return true;
case "prefix": getOrCreateConfiguration((AWS2S3Component) component).setPrefix((java.lang.String) value); return true;
case "region": getOrCreateConfiguration((AWS2S3Component) component).setRegion((java.lang.String) value); return true;
case "uriEndpointOverride": getOrCreateConfiguration((AWS2S3Component) component).setUriEndpointOverride((java.lang.String) value); return true;
case "customerAlgorithm": getOrCreateConfiguration((AWS2S3Component) component).setCustomerAlgorithm((java.lang.String) value); return true;
case "customerKeyId": getOrCreateConfiguration((AWS2S3Component) component).setCustomerKeyId((java.lang.String) value); return true;
case "customerKeyMD5": getOrCreateConfiguration((AWS2S3Component) component).setCustomerKeyMD5((java.lang.String) value); return true;
case "bridgeErrorHandler": ((AWS2S3Component) component).setBridgeErrorHandler((boolean) value); return true;
case "deleteAfterRead": getOrCreateConfiguration((AWS2S3Component) component).setDeleteAfterRead((boolean) value); return true;
case "destinationBucket": getOrCreateConfiguration((AWS2S3Component) component).setDestinationBucket((java.lang.String) value); return true;
case "destinationBucketPrefix": getOrCreateConfiguration((AWS2S3Component) component).setDestinationBucketPrefix((java.lang.String) value); return true;
case "destinationBucketSuffix": getOrCreateConfiguration((AWS2S3Component) component).setDestinationBucketSuffix((java.lang.String) value); return true;
case "doneFileName": getOrCreateConfiguration((AWS2S3Component) component).setDoneFileName((java.lang.String) value); return true;
case "fileName": getOrCreateConfiguration((AWS2S3Component) component).setFileName((java.lang.String) value); return true;
case "includeBody": getOrCreateConfiguration((AWS2S3Component) component).setIncludeBody((boolean) value); return true;
case "includeFolders": getOrCreateConfiguration((AWS2S3Component) component).setIncludeFolders((boolean) value); return true;
case "moveAfterRead": getOrCreateConfiguration((AWS2S3Component) component).setMoveAfterRead((boolean) value); return true;
case "removePrefixOnMove": getOrCreateConfiguration((AWS2S3Component) component).setRemovePrefixOnMove((boolean) value); return true;
case "autocloseBody": getOrCreateConfiguration((AWS2S3Component) component).setAutocloseBody((boolean) value); return true;
case "batchMessageNumber": getOrCreateConfiguration((AWS2S3Component) component).setBatchMessageNumber((int) value); return true;
case "batchSize": getOrCreateConfiguration((AWS2S3Component) component).setBatchSize((int) value); return true;
case "bufferSize": getOrCreateConfiguration((AWS2S3Component) component).setBufferSize((int) value); return true;
case "deleteAfterWrite": getOrCreateConfiguration((AWS2S3Component) component).setDeleteAfterWrite((boolean) value); return true;
case "keyName": getOrCreateConfiguration((AWS2S3Component) component).setKeyName((java.lang.String) value); return true;
case "lazyStartProducer": ((AWS2S3Component) component).setLazyStartProducer((boolean) value); return true;
case "multiPartUpload": getOrCreateConfiguration((AWS2S3Component) component).setMultiPartUpload((boolean) value); return true;
case "namingStrategy": getOrCreateConfiguration((AWS2S3Component) component).setNamingStrategy((org.apache.camel.component.aws2.s3.stream.AWSS3NamingStrategyEnum) value); return true;
case "operation": getOrCreateConfiguration((AWS2S3Component) component).setOperation((org.apache.camel.component.aws2.s3.AWS2S3Operations) value); return true;
case "partSize": getOrCreateConfiguration((AWS2S3Component) component).setPartSize((long) value); return true;
case "restartingPolicy": getOrCreateConfiguration((AWS2S3Component) component).setRestartingPolicy((org.apache.camel.component.aws2.s3.stream.AWSS3RestartingPolicyEnum) value); return true;
case "storageClass": getOrCreateConfiguration((AWS2S3Component) component).setStorageClass((java.lang.String) value); return true;
case "streamingUploadMode": getOrCreateConfiguration((AWS2S3Component) component).setStreamingUploadMode((boolean) value); return true;
case "streamingUploadTimeout": getOrCreateConfiguration((AWS2S3Component) component).setStreamingUploadTimeout((long) value); return true;
case "timestampGroupingEnabled": getOrCreateConfiguration((AWS2S3Component) component).setTimestampGroupingEnabled((boolean) value); return true;
case "timestampHeaderName": getOrCreateConfiguration((AWS2S3Component) component).setTimestampHeaderName((java.lang.String) value); return true;
case "timestampWindowSizeMillis": getOrCreateConfiguration((AWS2S3Component) component).setTimestampWindowSizeMillis((long) value); return true;
case "awsKMSKeyId": getOrCreateConfiguration((AWS2S3Component) component).setAwsKMSKeyId((java.lang.String) value); return true;
case "conditionalWritesEnabled": getOrCreateConfiguration((AWS2S3Component) component).setConditionalWritesEnabled((boolean) value); return true;
case "useAwsKMS": getOrCreateConfiguration((AWS2S3Component) component).setUseAwsKMS((boolean) value); return true;
case "useCustomerKey": getOrCreateConfiguration((AWS2S3Component) component).setUseCustomerKey((boolean) value); return true;
case "useSSES3": getOrCreateConfiguration((AWS2S3Component) component).setUseSSES3((boolean) value); return true;
case "amazonS3Client": getOrCreateConfiguration((AWS2S3Component) component).setAmazonS3Client((software.amazon.awssdk.services.s3.S3Client) value); return true;
case "amazonS3Presigner": getOrCreateConfiguration((AWS2S3Component) component).setAmazonS3Presigner((software.amazon.awssdk.services.s3.presigner.S3Presigner) value); return true;
case "autowiredEnabled": ((AWS2S3Component) component).setAutowiredEnabled((boolean) value); return true;
case "healthCheckConsumerEnabled": ((AWS2S3Component) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((AWS2S3Component) component).setHealthCheckProducerEnabled((boolean) value); return true;
case "proxyHost": getOrCreateConfiguration((AWS2S3Component) component).setProxyHost((java.lang.String) value); return true;
case "proxyPort": getOrCreateConfiguration((AWS2S3Component) component).setProxyPort((java.lang.Integer) value); return true;
case "proxyProtocol": getOrCreateConfiguration((AWS2S3Component) component).setProxyProtocol((software.amazon.awssdk.core.Protocol) value); return true;
case "accessKey": getOrCreateConfiguration((AWS2S3Component) component).setAccessKey((java.lang.String) value); return true;
case "profileCredentialsName": getOrCreateConfiguration((AWS2S3Component) component).setProfileCredentialsName((java.lang.String) value); return true;
case "secretKey": getOrCreateConfiguration((AWS2S3Component) component).setSecretKey((java.lang.String) value); return true;
case "sessionToken": getOrCreateConfiguration((AWS2S3Component) component).setSessionToken((java.lang.String) value); return true;
case "trustAllCertificates": getOrCreateConfiguration((AWS2S3Component) component).setTrustAllCertificates((boolean) value); return true;
case "useDefaultCredentialsProvider": getOrCreateConfiguration((AWS2S3Component) component).setUseDefaultCredentialsProvider((boolean) value); return true;
case "useProfileCredentialsProvider": getOrCreateConfiguration((AWS2S3Component) component).setUseProfileCredentialsProvider((boolean) value); return true;
case "useSessionCredentials": getOrCreateConfiguration((AWS2S3Component) component).setUseSessionCredentials((boolean) value); return true;
default: return false;
}
}
}
} | Aws2S3ComponentBuilderImpl |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/test/java/org/apache/dubbo/rpc/protocol/tri/rest/GeneralTypeConverterTest.java | {
"start": 1247,
"end": 2136
} | class ____ {
private static final Logger logger = LoggerFactory.getLogger(GeneralTypeConverterTest.class);
public List<? extends Number>[] items;
@Test
void convert() throws NoSuchFieldException {
GeneralTypeConverter smartConverter = new GeneralTypeConverter(FrameworkModel.defaultModel());
smartConverter.convert(
"23,56", GeneralTypeConverterTest.class.getField("items").getGenericType());
}
@Test
void convert1() {
Object convert = JsonUtils.toJavaObject("[1,\"aa\"]", List.class);
Assertions.assertEquals(2, ((List) convert).size());
}
@Test
void convert2() throws NoSuchFieldException {
Class<?> type = TypeUtils.getActualType(
GeneralTypeConverterTest.class.getField("items").getGenericType());
logger.info(String.valueOf(type));
}
}
| GeneralTypeConverterTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/UnsecuredPet.java | {
"start": 70,
"end": 352
} | class ____ extends AbstractUnsecuredPet {
private Veterinarian veterinarian;
public Veterinarian getVeterinarian() {
return veterinarian;
}
public void setVeterinarian(Veterinarian veterinarian) {
this.veterinarian = veterinarian;
}
}
| UnsecuredPet |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/steps/MainClassBuildStep.java | {
"start": 37483,
"end": 38475
} | class ____ {
private final boolean isValid;
private final ClassVisitor classVisitor;
private Result(boolean isValid, ClassVisitor classVisitor) {
this.isValid = isValid;
this.classVisitor = classVisitor;
}
private static Result valid(ClassVisitor classVisitor) {
return new Result(true, classVisitor);
}
private static Result invalid(ClassVisitor classVisitor) {
return new Result(false, classVisitor);
}
private static Result invalid() {
return new Result(false, null);
}
}
}
@BuildStep
ReflectiveFieldBuildItem setupVersionField() {
return new ReflectiveFieldBuildItem(
"Ensure it's included in the executable to be able to grep the quarkus version",
Application.APP_CLASS_NAME, QUARKUS_ANALYTICS_QUARKUS_VERSION);
}
}
| Result |
java | apache__camel | components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/AS2MimeType.java | {
"start": 855,
"end": 2054
} | interface ____ {
/**
* Mime Type for Multipart Signed Data
*/
String MULTIPART_SIGNED = "multipart/signed";
/**
* Mime Type for Application PKCS7 Signature
*/
String APPLICATION_PKCS7_SIGNATURE = "application/pkcs7-signature";
/**
* Mime Type for Application PKCS7 Signature
*/
String APPLICATION_PKCS7_MIME = "application/pkcs7-mime";
/**
* Mime Type for Text/Plain Data
*/
String TEXT_PLAIN = "text/plain";
/**
* Mime Type for Application/EDIFACT
*/
String APPLICATION_EDIFACT = "application/edifact";
/**
* Mime Type for Application/XML
*/
String APPLICATION_XML = "application/xml";
/**
* Mime Type for Application/EDI-X12
*/
String APPLICATION_EDI_X12 = "application/edi-x12";
/**
* Mime Type for Application/EDI-consent
*/
String APPLICATION_EDI_CONSENT = "application/edi-consent";
/**
* Mime Type for Multipart/Report
*/
String MULTIPART_REPORT = "multipart/report";
/**
* Mime Type for Message/Disposition-Notification
*/
String MESSAGE_DISPOSITION_NOTIFICATION = "message/disposition-notification";
}
| AS2MimeType |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 3358,
"end": 33199
} | class ____ {
private static Stream<TestSpec> testData() {
return Stream.concat(functionSpecs(), procedureSpecs());
}
private static Stream<TestSpec> functionSpecs() {
return Stream.of(
// function hint defines everything
TestSpec.forScalarFunction(FullFunctionHint.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectStaticArgument(StaticArgument.scalar("s", DataTypes.STRING(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.BOOLEAN())),
// ---
// function hint defines everything with overloading
TestSpec.forScalarFunction(FullFunctionHints.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.INT()))),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.BIGINT()))),
TypeStrategies.explicit(DataTypes.BIGINT())),
// ---
// global output hint with local input overloading
TestSpec.forScalarFunction(GlobalOutputFunctionHint.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.INT()))),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.STRING()))),
TypeStrategies.explicit(DataTypes.INT())),
// ---
// unsupported output overloading
TestSpec.forScalarFunction(InvalidSingleOutputFunctionHint.class)
.expectErrorMessage(
"Function hints that lead to ambiguous results are not allowed."),
// ---
// global and local overloading
TestSpec.forScalarFunction(SplitFullFunctionHints.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.INT()))),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.BIGINT()))),
TypeStrategies.explicit(DataTypes.BIGINT())),
// ---
// global and local overloading with unsupported output overloading
TestSpec.forScalarFunction(InvalidFullOutputFunctionHint.class)
.expectErrorMessage(
"Function hints with same input definition but different result types are not allowed."),
// ---
// ignore argument names during overloading
TestSpec.forScalarFunction(InvalidFullOutputFunctionWithArgNamesHint.class)
.expectErrorMessage(
"Function hints with same input definition but different result types are not allowed."),
// ---
// invalid data type hint
TestSpec.forScalarFunction(IncompleteFunctionHint.class)
.expectErrorMessage("Data type missing for scalar argument at position 1."),
// ---
// varargs and ANY input group
TestSpec.forScalarFunction(ComplexFunctionHint.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"myInt", "myAny"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.ARRAY(DataTypes.INT())),
InputTypeStrategies.ANY
}),
TypeStrategies.explicit(DataTypes.BOOLEAN())),
// ---
// global input hints and local output hints
TestSpec.forScalarFunction(GlobalInputFunctionHints.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.INT()))),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.BIGINT()))),
TypeStrategies.explicit(DataTypes.INT())),
// ---
// no arguments
TestSpec.forScalarFunction(ZeroArgFunction.class)
.expectEmptyStaticArguments()
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// no arguments async
TestSpec.forAsyncScalarFunction(ZeroArgFunctionAsync.class)
.expectEmptyStaticArguments()
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// no arguments async table
TestSpec.forAsyncTableFunction(ZeroArgFunctionAsyncTable.class)
.expectEmptyStaticArguments()
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// test primitive arguments extraction
TestSpec.forScalarFunction(MixedArgFunction.class)
.expectStaticArgument(
StaticArgument.scalar(
"i", DataTypes.INT().notNull().bridgedTo(int.class), false))
.expectStaticArgument(StaticArgument.scalar("d", DataTypes.DOUBLE(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// test primitive arguments extraction async
TestSpec.forAsyncScalarFunction(MixedArgFunctionAsync.class)
.expectStaticArgument(
StaticArgument.scalar(
"i", DataTypes.INT().notNull().bridgedTo(int.class), false))
.expectStaticArgument(StaticArgument.scalar("d", DataTypes.DOUBLE(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// test primitive arguments extraction async table
TestSpec.forAsyncTableFunction(MixedArgFunctionAsyncTable.class)
.expectStaticArgument(
StaticArgument.scalar(
"i", DataTypes.INT().notNull().bridgedTo(int.class), false))
.expectStaticArgument(StaticArgument.scalar("d", DataTypes.DOUBLE(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// test overloaded arguments extraction
TestSpec.forScalarFunction(OverloadedFunction.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"i", "d"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(DataTypes.DOUBLE())
}),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"s"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(DataTypes.STRING())
}),
TypeStrategies.explicit(
DataTypes.BIGINT().notNull().bridgedTo(long.class))),
// ---
// test overloaded arguments extraction async
TestSpec.forAsyncScalarFunction(OverloadedFunctionAsync.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"i", "d"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(DataTypes.DOUBLE())
}),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"s"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(DataTypes.STRING())
}),
TypeStrategies.explicit(DataTypes.BIGINT())),
// ---
// test overloaded arguments extraction async
TestSpec.forAsyncTableFunction(OverloadedFunctionAsyncTable.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"i", "d"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(DataTypes.DOUBLE())
}),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"s"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(DataTypes.STRING())
}),
TypeStrategies.explicit(DataTypes.INT())),
// ---
// test varying arguments extraction
TestSpec.forScalarFunction(VarArgFunction.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"i", "more"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// test varying arguments extraction async
TestSpec.forAsyncScalarFunction(VarArgFunctionAsync.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"i", "more"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// test varying arguments extraction async table
TestSpec.forAsyncTableFunction(VarArgFunctionAsyncTable.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"i", "more"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class)),
InputTypeStrategies.explicit(
DataTypes.INT().notNull().bridgedTo(int.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// test varying arguments extraction with byte
TestSpec.forScalarFunction(VarArgWithByteFunction.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"bytes"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.TINYINT()
.notNull()
.bridgedTo(byte.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// test varying arguments extraction with byte async
TestSpec.forAsyncScalarFunction(VarArgWithByteFunctionAsync.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"bytes"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.TINYINT()
.notNull()
.bridgedTo(byte.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// test varying arguments extraction with byte async
TestSpec.forAsyncTableFunction(VarArgWithByteFunctionAsyncTable.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"bytes"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(
DataTypes.TINYINT()
.notNull()
.bridgedTo(byte.class))
}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// output hint with input extraction
TestSpec.forScalarFunction(ExtractWithOutputHintFunction.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// output hint with input extraction
TestSpec.forAsyncScalarFunction(ExtractWithOutputHintFunctionAsync.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// output hint with input extraction
TestSpec.forAsyncTableFunction(ExtractWithOutputHintFunctionAsyncTable.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// output extraction with input hints
TestSpec.forScalarFunction(ExtractWithInputHintFunction.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectStaticArgument(
StaticArgument.scalar("b", DataTypes.BOOLEAN(), false))
.expectOutput(
TypeStrategies.explicit(
DataTypes.DOUBLE().notNull().bridgedTo(double.class))),
// ---
// non-scalar args
TestSpec.forScalarFunction(TableArgScalarFunction.class)
.expectErrorMessage(
"Only scalar arguments are supported at this location. "
+ "But argument 't' declared the following traits: [ROW_SEMANTIC_TABLE]"),
// ---
// different accumulator depending on input
TestSpec.forAggregateFunction(InputDependentAccumulatorFunction.class)
.expectAccumulator(
TypeStrategies.mapping(
Map.of(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(
InputTypeStrategies.explicit(
DataTypes.BIGINT()))),
TypeStrategies.explicit(
DataTypes.ROW(
DataTypes.FIELD(
"f", DataTypes.BIGINT()))),
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(
InputTypeStrategies.explicit(
DataTypes.STRING()))),
TypeStrategies.explicit(
DataTypes.ROW(
DataTypes.FIELD(
"f",
DataTypes.STRING()))))))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.BIGINT()))),
TypeStrategies.explicit(DataTypes.STRING()))
.expectOutputMapping(
InputTypeStrategies.sequence(
List.of("arg0"),
List.of(InputTypeStrategies.explicit(DataTypes.STRING()))),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// input, accumulator, and output are spread across the function
TestSpec.forAggregateFunction(AggregateFunctionWithManyAnnotations.class)
.expectStaticArgument(
StaticArgument.scalar(
"r",
DataTypes.ROW(
DataTypes.FIELD("i", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.BOOLEAN())),
false))
.expectAccumulator(
TypeStrategies.explicit(
DataTypes.ROW(DataTypes.FIELD("b", DataTypes.BOOLEAN()))))
.expectOutput(TypeStrategies.explicit(DataTypes.STRING())),
// ---
// accumulator with state hint
TestSpec.forAggregateFunction(StateHintAggregateFunction.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectState("myAcc", TypeStrategies.explicit(MyState.TYPE))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// accumulator with state hint in function hint
TestSpec.forAggregateFunction(StateHintInFunctionHintAggregateFunction.class)
.expectStaticArgument(StaticArgument.scalar("i", DataTypes.INT(), false))
.expectState("myAcc", TypeStrategies.explicit(MyState.TYPE))
.expectOutput(TypeStrategies.explicit(DataTypes.INT())),
// ---
// test for table functions
TestSpec.forTableFunction(OutputHintTableFunction.class)
.expectStaticArgument(
StaticArgument.scalar(
"i", DataTypes.INT().notNull().bridgedTo(int.class), false))
.expectOutput(
TypeStrategies.explicit(
DataTypes.ROW(
DataTypes.FIELD("i", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.BOOLEAN())))),
// ---
// mismatch between hints and implementation regarding return type
TestSpec.forScalarFunction(InvalidMethodScalarFunction.class)
.expectErrorMessage(
"Considering all hints, the method should comply with the signature:\n"
+ "java.lang.String eval(int[])"),
// ---
// mismatch between hints and implementation regarding return type
TestSpec.forAsyncScalarFunction(InvalidMethodScalarFunctionAsync.class)
.expectErrorMessage(
"Considering all hints, the method should comply with the signature:\n"
+ "eval(java.util.concurrent.CompletableFuture, int[])"),
// ---
// mismatch between hints and implementation regarding return type
TestSpec.forAsyncTableFunction(InvalidMethodTableFunctionAsync.class)
.expectErrorMessage(
"Considering all hints, the method should comply with the signature:\n"
+ "eval(java.util.concurrent.CompletableFuture, int[])"),
// ---
TestSpec.forAsyncTableFunction(InvalidMethodTableFunctionMissingCollection.class)
.expectErrorMessage(
"The method 'eval' expects nested generic type CompletableFuture<Collection> for the 0 arg."),
// ---
TestSpec.forAsyncTableFunction(InvalidMethodTableFunctionWrongGeneric.class)
.expectErrorMessage(
"The method 'eval' expects nested generic type CompletableFuture<Collection> for the 0 arg."),
// ---
TestSpec.forAsyncTableFunction(ConflictingReturnTypesAsyncTable.class)
.expectErrorMessage(
"Considering all hints, the method should comply with the signature:\n"
+ "eval(java.util.concurrent.CompletableFuture, int)"),
// ---
// mismatch between hints and implementation regarding accumulator
TestSpec.forAggregateFunction(InvalidMethodAggregateFunction.class)
.expectErrorMessage(
"Considering all hints, the method should comply with the signature:\n"
+ "accumulate(java.lang.Integer, int, boolean)\n"
+ "Pattern: (<accumulator> [, <argument>]*)"),
// ---
// no implementation
TestSpec.forTableFunction(MissingMethodTableFunction.class)
.expectErrorMessage(
"Could not find a publicly accessible method named 'eval'."),
// ---
// named arguments with overloaded function
// expected no named argument for overloaded function
TestSpec.forScalarFunction(NamedArgumentsScalarFunction.class),
// ---
// scalar function that takes any input
TestSpec.forScalarFunction(InputGroupScalarFunction.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"o"},
new ArgumentTypeStrategy[] {InputTypeStrategies.ANY}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
// scalar function that takes any input as vararg
TestSpec.forScalarFunction(VarArgInputGroupScalarFunction.class)
.expectOutputMapping(
InputTypeStrategies.varyingSequence(
new String[] {"o"},
new ArgumentTypeStrategy[] {InputTypeStrategies.ANY}),
TypeStrategies.explicit(DataTypes.STRING())),
// ---
TestSpec.forScalarFunction(
"Scalar function with implicit overloading order",
OrderedScalarFunction.class)
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"i"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(DataTypes.INT())
}),
TypeStrategies.explicit(DataTypes.INT()))
.expectOutputMapping(
InputTypeStrategies.sequence(
new String[] {"l"},
new ArgumentTypeStrategy[] {
InputTypeStrategies.explicit(DataTypes.BIGINT())
}),
TypeStrategies.explicit(DataTypes.BIGINT())),
// ---
TestSpec.forScalarFunction(
"Scalar function with explicit overloading order by | TypeInferenceExtractorTest |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/server/DefaultPathContainer.java | {
"start": 1322,
"end": 5975
} | class ____ implements PathContainer {
private static final PathContainer EMPTY_PATH = new DefaultPathContainer("", Collections.emptyList());
private static final Map<Character, DefaultSeparator> SEPARATORS = new HashMap<>(2);
static {
SEPARATORS.put('/', new DefaultSeparator('/', "%2F"));
SEPARATORS.put('.', new DefaultSeparator('.', "%2E"));
}
private final String path;
private final List<Element> elements;
private DefaultPathContainer(String path, List<Element> elements) {
this.path = path;
this.elements = Collections.unmodifiableList(elements);
}
@Override
public String value() {
return this.path;
}
@Override
public List<Element> elements() {
return this.elements;
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other) || (other instanceof PathContainer that && value().equals(that.value()));
}
@Override
public int hashCode() {
return this.path.hashCode();
}
@Override
public String toString() {
return value();
}
static PathContainer createFromUrlPath(String path, Options options) {
if (path.isEmpty()) {
return EMPTY_PATH;
}
char separator = options.separator();
DefaultSeparator separatorElement = SEPARATORS.get(separator);
if (separatorElement == null) {
throw new IllegalArgumentException("Unexpected separator: '" + separator + "'");
}
List<Element> elements = new ArrayList<>();
int begin;
if (path.charAt(0) == separator) {
begin = 1;
elements.add(separatorElement);
}
else {
begin = 0;
}
while (begin < path.length()) {
int end = path.indexOf(separator, begin);
String segment = (end != -1 ? path.substring(begin, end) : path.substring(begin));
if (!segment.isEmpty()) {
elements.add(options.shouldDecodeAndParseSegments() ?
decodeAndParsePathSegment(segment) :
DefaultPathSegment.from(segment, separatorElement));
}
if (end == -1) {
break;
}
elements.add(separatorElement);
begin = end + 1;
}
return new DefaultPathContainer(path, elements);
}
private static PathSegment decodeAndParsePathSegment(String segment) {
Charset charset = StandardCharsets.UTF_8;
int index = segment.indexOf(';');
if (index == -1) {
String valueToMatch = StringUtils.uriDecode(segment, charset);
return DefaultPathSegment.from(segment, valueToMatch);
}
else {
String valueToMatch = StringUtils.uriDecode(segment.substring(0, index), charset);
String pathParameterContent = segment.substring(index);
MultiValueMap<String, String> parameters = parsePathParams(pathParameterContent, charset);
return DefaultPathSegment.from(segment, valueToMatch, parameters);
}
}
private static MultiValueMap<String, String> parsePathParams(String input, Charset charset) {
MultiValueMap<String, String> result = new LinkedMultiValueMap<>();
int begin = 1;
while (begin < input.length()) {
int end = input.indexOf(';', begin);
String param = (end != -1 ? input.substring(begin, end) : input.substring(begin));
parsePathParamValues(param, charset, result);
if (end == -1) {
break;
}
begin = end + 1;
}
return result;
}
private static void parsePathParamValues(String input, Charset charset, MultiValueMap<String, String> output) {
if (StringUtils.hasText(input)) {
int index = input.indexOf('=');
if (index != -1) {
String name = input.substring(0, index);
name = StringUtils.uriDecode(name, charset);
if (StringUtils.hasText(name)) {
String value = input.substring(index + 1);
for (String v : StringUtils.commaDelimitedListToStringArray(value)) {
output.add(name, StringUtils.uriDecode(v, charset));
}
}
}
else {
String name = StringUtils.uriDecode(input, charset);
if (StringUtils.hasText(name)) {
output.add(input, "");
}
}
}
}
static PathContainer subPath(PathContainer container, int fromIndex, int toIndex) {
List<Element> elements = container.elements();
if (fromIndex == 0 && toIndex == elements.size()) {
return container;
}
if (fromIndex == toIndex) {
return EMPTY_PATH;
}
Assert.isTrue(fromIndex >= 0 && fromIndex < elements.size(), () -> "Invalid fromIndex: " + fromIndex);
Assert.isTrue(toIndex >= 0 && toIndex <= elements.size(), () -> "Invalid toIndex: " + toIndex);
Assert.isTrue(fromIndex < toIndex, () -> "fromIndex: " + fromIndex + " should be < toIndex " + toIndex);
List<Element> subList = elements.subList(fromIndex, toIndex);
String path = subList.stream().map(Element::value).collect(Collectors.joining(""));
return new DefaultPathContainer(path, subList);
}
private static | DefaultPathContainer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/xml/Supermarket.java | {
"start": 142,
"end": 310
} | class ____ extends BaseShop {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| Supermarket |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/idgen/userdefined/UserDefinedGeneratorsTests.java | {
"start": 2138,
"end": 5212
} | class ____ {
@Test
public void testCreateGeneratorsByBeanContainer() {
final BeanContainer beanContainer = Mockito.mock( BeanContainer.class );
given(beanContainer.getBean( any(), any(), any() ) ).willAnswer( invocation -> {
Class<?> beanType = (Class<?>) invocation.getArguments()[0];
LifecycleOptions options = (LifecycleOptions) invocation.getArguments()[1];
if (beanType == TestIdentifierGenerator.class) {
assertThat( options.canUseCachedReferences(), is( false ) );
assertThat( options.useJpaCompliantCreation(), is( true ) );
return new ContainedBean<TestIdentifierGenerator>() {
@Override
public TestIdentifierGenerator getBeanInstance() {
return new TestIdentifierGenerator();
}
@Override
public Class<TestIdentifierGenerator> getBeanClass() {
return TestIdentifierGenerator.class;
}
};
}
else {
return new ContainedBean<>() {
@Override
public Object getBeanInstance() {
return ( ( BeanInstanceProducer ) invocation.getArguments()[2] ).produceBeanInstance( beanType );
}
@Override
public Class getBeanClass() {
return beanType;
}
};
}
} );
final StandardServiceRegistryBuilder ssrb = ServiceRegistryUtil.serviceRegistryBuilder();
ssrb.applySetting( AvailableSettings.BEAN_CONTAINER, beanContainer )
.applySetting( AvailableSettings.ALLOW_EXTENSIONS_IN_CDI, "true" );
try (final StandardServiceRegistry ssr = ssrb.build()) {
final Metadata metadata = new MetadataSources( ssr )
.addAnnotatedClass( Entity1.class )
.addAnnotatedClass( Entity2.class )
.buildMetadata();
final PersistentClass entityBinding1 = metadata.getEntityBinding( Entity1.class.getName() );
final PersistentClass entityBinding2 = metadata.getEntityBinding( Entity2.class.getName() );
KeyValue keyValue1 = entityBinding1.getRootClass()
.getIdentifier();
Dialect dialect1 = new H2Dialect();
final Generator generator3 = keyValue1.createGenerator(dialect1, entityBinding1.getRootClass());
final IdentifierGenerator generator1 = generator3 instanceof IdentifierGenerator ? (IdentifierGenerator) generator3 : null;
KeyValue keyValue = entityBinding2.getRootClass()
.getIdentifier();
Dialect dialect = new H2Dialect();
final Generator generator = keyValue.createGenerator( dialect, entityBinding2.getRootClass());
final IdentifierGenerator generator2 = generator instanceof IdentifierGenerator ? (IdentifierGenerator) generator : null;
then( beanContainer ).should( times( 2 ) ).getBean( same( TestIdentifierGenerator.class ),
any( LifecycleOptions.class ),
same( FallbackBeanInstanceProducer.INSTANCE )
);
assertThat( generator1, is( instanceOf( TestIdentifierGenerator.class ) ) );
assertThat( generator2, is( instanceOf( TestIdentifierGenerator.class ) ) );
assertThat( generator1 == generator2, is( false ) ); // should not be same instance
}
}
@Entity( name = "Entity1" )
@Table( name = "tbl_1" )
public static | UserDefinedGeneratorsTests |
java | apache__camel | components/camel-spring-parent/camel-spring-security/src/main/java/org/apache/camel/component/spring/security/config/BeanDefinitionParser.java | {
"start": 3750,
"end": 4833
} | class ____ a
* setter method '<code>setBingoHallFavourite(String)</code>', the name returned had better be
* '<code>bingoHallFavourite</code>' (with that exact casing).
*
* @param attributeName the attribute name taken straight from the XML element being parsed (never
* <code>null</code>)
* @return the extracted JavaBean property name (must never be <code>null</code>)
*/
protected String extractPropertyName(String attributeName) {
return Conventions.attributeNameToPropertyName(attributeName);
}
/**
* Hook method that derived classes can implement to inspect/change a bean definition after parsing is complete.
* <p>
* The default implementation does nothing.
*
* @param beanDefinition the parsed (and probably totally defined) bean definition being built
* @param element the XML element that was the source of the bean definition's metadata
*/
protected void postProcess(BeanDefinitionBuilder beanDefinition, Element element) {
}
}
| with |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 27080,
"end": 27202
} | interface ____ {",
" Object conflict();",
"",
" @Subcomponent.Builder",
" | B |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/LazyGroupWithInheritanceTest.java | {
"start": 2085,
"end": 10422
} | class ____ {
@Test
public void loadEntityWithAssociationToAbstract(SessionFactoryScope scope) {
final Statistics stats = scope.getSessionFactory().getStatistics();
stats.clear();
scope.inTransaction(
(session) -> {
final Order loaded = session.byId( Order.class ).load( 1 );
assert Hibernate.isPropertyInitialized( loaded, "customer" );
assertThat( stats.getPrepareStatementCount(), is( 1L ) );
assertThat( loaded, instanceOf( PersistentAttributeInterceptable.class ) );
final PersistentAttributeInterceptor interceptor = ((PersistentAttributeInterceptable) loaded).$$_hibernate_getInterceptor();
assertThat( interceptor, instanceOf( BytecodeLazyAttributeInterceptor.class ) );
final BytecodeLazyAttributeInterceptor interceptor1 = (BytecodeLazyAttributeInterceptor) interceptor;
}
);
}
@Test
public void queryEntityWithAssociationToAbstract(SessionFactoryScope scope) {
final Statistics stats = scope.getSessionFactory().getStatistics();
stats.clear();
final AtomicInteger expectedQueryCount = new AtomicInteger( 0 );
scope.inTransaction(
session -> {
final List<Order> orders = session.createQuery( "select o from Order o", Order.class ).list();
// todo (HHH-11147) : this is a regression from 4.x
// - the condition is that the association from Order to Customer points to the non-root
// entity (Customer) rather than one of its concrete subtypes (DomesticCustomer,
// ForeignCustomer). We'd have to read the "other table" to be able to resolve the
// concrete type. The same holds true for associations to versioned entities as well.
// The only viable solution I see would be to join to the "other side" and read the
// version/discriminator[1]. But of course that means doing the join which is generally
// what the application is trying to avoid in the first place
expectedQueryCount.set( 1 );
//expectedQueryCount.set( 4 );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
for ( Order order : orders ) {
System.out.println( "############################################" );
System.out.println( "Starting Order #" + order.getOid() );
// accessing the many-to-one's id should not trigger a load
if ( order.getCustomer().getOid() == null ) {
System.out.println( "Got Order#customer: " + order.getCustomer().getOid() );
}
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
// accessing the one-to-many should trigger a load
final Set<Payment> orderPayments = order.getPayments();
System.out.println( "Number of payments = " + orderPayments.size() );
expectedQueryCount.getAndIncrement();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
// access the non-inverse, logical 1-1
order.getSupplemental();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
if ( order.getSupplemental() != null ) {
System.out.println( "Got Order#supplemental = " + order.getSupplemental().getOid() );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
}
// access the inverse, logical 1-1
order.getSupplemental2();
expectedQueryCount.getAndIncrement();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
if ( order.getSupplemental2() != null ) {
System.out.println( "Got Order#supplemental2 = " + order.getSupplemental2().getOid() );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
}
}
}
);
}
/**
* Same test as {@link #queryEntityWithAssociationToAbstract(SessionFactoryScope)}, but using runtime
* fetching to issues just a single select
*/
@Test
public void queryEntityWithAssociationToAbstractRuntimeFetch(SessionFactoryScope scope) {
final Statistics stats = scope.getSessionFactory().getStatistics();
stats.clear();
final AtomicInteger expectedQueryCount = new AtomicInteger( 0 );
scope.inTransaction(
session -> {
final String qry = "select o from Order o join fetch o.customer c join fetch o.payments join fetch o.supplemental join fetch o.supplemental2";
final List<Order> orders = session.createQuery( qry, Order.class ).list();
// oh look - just a single query for all the data we will need. hmm, crazy
expectedQueryCount.set( 1 );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
for ( Order order : orders ) {
System.out.println( "############################################" );
System.out.println( "Starting Order #" + order.getOid() );
// accessing the many-to-one's id should not trigger a load
if ( order.getCustomer().getOid() == null ) {
System.out.println( "Got Order#customer: " + order.getCustomer().getOid() );
}
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
// accessing the one-to-many should trigger a load
final Set<Payment> orderPayments = order.getPayments();
System.out.println( "Number of payments = " + orderPayments.size() );
// loaded already
// expectedQueryCount.getAndIncrement();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
// access the non-inverse, logical 1-1
order.getSupplemental();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
if ( order.getSupplemental() != null ) {
System.out.println( "Got Order#supplemental = " + order.getSupplemental().getOid() );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
}
// access the inverse, logical 1-1
order.getSupplemental2();
// loaded already
// expectedQueryCount.getAndIncrement();
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
if ( order.getSupplemental2() != null ) {
System.out.println( "Got Order#supplemental2 = " + order.getSupplemental2().getOid() );
assertEquals( expectedQueryCount.get(), stats.getPrepareStatementCount() );
}
}
}
);
}
@BeforeEach
public void prepareTestData(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final Address austin = new Address( 1, "Austin" );
final Address london = new Address( 2, "London" );
session.persist( austin );
session.persist( london );
final ForeignCustomer acme = new ForeignCustomer( 1, "Acme", london, "1234" );
final ForeignCustomer acmeBrick = new ForeignCustomer( 2, "Acme Brick", london, "9876", acme );
final ForeignCustomer freeBirds = new ForeignCustomer( 3, "Free Birds", austin, "13579" );
session.persist( acme );
session.persist( acmeBrick );
session.persist( freeBirds );
final Order order1 = new Order( 1, "some text", freeBirds );
freeBirds.getOrders().add( order1 );
session.persist( order1 );
final OrderSupplemental orderSupplemental = new OrderSupplemental( 1, 1 );
order1.setSupplemental( orderSupplemental );
final OrderSupplemental2 orderSupplemental2_1 = new OrderSupplemental2( 2, 2 );
order1.setSupplemental2( orderSupplemental2_1 );
orderSupplemental2_1.setOrder( order1 );
session.persist( orderSupplemental );
session.persist( orderSupplemental2_1 );
final Order order2 = new Order( 2, "some text", acme );
acme.getOrders().add( order2 );
session.persist( order2 );
final OrderSupplemental2 orderSupplemental2_2 = new OrderSupplemental2( 3, 3 );
order2.setSupplemental2( orderSupplemental2_2 );
orderSupplemental2_2.setOrder( order2 );
session.persist( orderSupplemental2_2 );
final CreditCardPayment payment1 = new CreditCardPayment( 1, 1F, "1" );
session.persist( payment1 );
order1.getPayments().add( payment1 );
final DebitCardPayment payment2 = new DebitCardPayment( 2, 2F, "2" );
session.persist( payment2 );
order1.getPayments().add( payment2 );
}
);
}
@AfterEach
public void cleanUpTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
}
| LazyGroupWithInheritanceTest |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/jackson/OAuth2UserAuthorityMixin.java | {
"start": 1021,
"end": 1447
} | class ____ used to serialize/deserialize {@link OAuth2UserAuthority}.
*
* @author Sebastien Deleuze
* @author Joe Grandja
* @since 7.0
* @see OAuth2UserAuthority
* @see OAuth2ClientJacksonModule
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, getterVisibility = JsonAutoDetect.Visibility.NONE,
isGetterVisibility = JsonAutoDetect.Visibility.NONE)
abstract | is |
java | elastic__elasticsearch | plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextPlugin.java | {
"start": 836,
"end": 1291
} | class ____ extends Plugin implements MapperPlugin, SearchPlugin {
@Override
public Map<String, Mapper.TypeParser> getMappers() {
return Collections.singletonMap(AnnotatedTextFieldMapper.CONTENT_TYPE, AnnotatedTextFieldMapper.PARSER);
}
@Override
public Map<String, Highlighter> getHighlighters() {
return Collections.singletonMap(AnnotatedTextHighlighter.NAME, new AnnotatedTextHighlighter());
}
}
| AnnotatedTextPlugin |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java | {
"start": 16355,
"end": 19164
} | class ____ extends AbstractBuilder {
NegatedSyntheticSourceDelegateBuilder(QueryBuilder next, String field, Source source) {
super(next, field, source);
}
@Override
public String getWriteableName() {
throw new UnsupportedOperationException("Not serialized");
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("negated_" + ENTRY.name);
builder.field("field", field() + ":synthetic_source_delegate");
builder.field("next", next(), params);
builder.field("source", source().toString());
builder.endObject();
}
@Override
public TransportVersion getMinimalSupportedVersion() {
throw new UnsupportedOperationException("Not serialized");
}
@Override
protected final org.apache.lucene.search.Query doToQuery(SearchExecutionContext context) throws IOException {
MappedFieldType ft = context.getFieldType(field());
if (ft == null) {
return new MatchNoDocsQuery("missing field [" + field() + "]");
}
ft = ((TextFieldMapper.TextFieldType) ft).syntheticSourceDelegate().orElse(null);
org.apache.lucene.search.Query svNext = simple(ft, context);
org.apache.lucene.search.Query ignored = new TermQuery(new org.apache.lucene.index.Term(IgnoredFieldMapper.NAME, ft.name()));
ignored = ignored.rewrite(context.searcher());
if (ignored instanceof MatchNoDocsQuery) {
return svNext;
}
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(svNext, BooleanClause.Occur.SHOULD);
builder.add(ignored, BooleanClause.Occur.SHOULD);
return builder.build();
}
@Override
protected AbstractBuilder rewrite(QueryBuilder next) {
return new Builder(next, field(), source());
}
}
/**
* Write a {@link Source} including the text in it.
*/
static void writeOldSource(StreamOutput out, Source source) throws IOException {
out.writeInt(source.source().getLineNumber());
out.writeInt(source.source().getColumnNumber());
out.writeString(source.text());
}
/**
* Read a {@link Source} including the text in it.
*/
static Source readOldSource(StreamInput in) throws IOException {
int line = in.readInt();
int column = in.readInt();
int charPositionInLine = column - 1;
String text = in.readString();
return new Source(new Location(line, charPositionInLine), text);
}
public | NegatedSyntheticSourceDelegateBuilder |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java | {
"start": 2729,
"end": 39664
} | class ____ extends AbstractSnapshotIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order
.put(LARGE_SNAPSHOT_POOL_SETTINGS) // we have #testGetSnapshotsWithSnapshotInProgress which needs many threads to ensure
// its snapshot pool does not become fully blocked on data nodes when blocking on data files
.build();
}
public void testStatusApiConsistency() throws Exception {
createRepository("test-repo", "fs");
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
indexDoc("test-idx-1", Integer.toString(i), "foo", "bar" + i);
indexDoc("test-idx-2", Integer.toString(i), "foo", "baz" + i);
indexDoc("test-idx-3", Integer.toString(i), "foo", "baz" + i);
}
refresh();
createFullSnapshot("test-repo", "test-snap");
List<SnapshotInfo> snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots();
assertThat(snapshotInfos.size(), equalTo(1));
SnapshotInfo snapshotInfo = snapshotInfos.get(0);
assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS));
assertThat(snapshotInfo.version(), equalTo(IndexVersion.current()));
final List<SnapshotStatus> snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots("test-snap")
.get()
.getSnapshots();
assertThat(snapshotStatus.size(), equalTo(1));
final SnapshotStatus snStatus = snapshotStatus.get(0);
assertEquals(snStatus.getStats().getStartTime(), snapshotInfo.startTime());
assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime());
}
public void testStatusAPICallInProgressSnapshot() throws Exception {
createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()).put("block_on_data", true));
createIndex("test-idx-1");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
indexDoc("test-idx-1", Integer.toString(i), "foo", "bar" + i);
}
refresh();
logger.info("--> snapshot");
ActionFuture<CreateSnapshotResponse> createSnapshotResponseActionFuture = startFullSnapshot("test-repo", "test-snap");
logger.info("--> wait for data nodes to get blocked");
waitForBlockOnAnyDataNode("test-repo");
awaitNumberOfSnapshotsInProgress(1);
assertEquals(
SnapshotsInProgress.State.STARTED,
clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots("test-snap")
.get()
.getSnapshots()
.get(0)
.getState()
);
logger.info("--> unblock all data nodes");
unblockAllDataNodes("test-repo");
assertSuccessful(createSnapshotResponseActionFuture);
}
public void testExceptionOnMissingSnapBlob() throws IOException {
disableRepoConsistencyCheck("This test intentionally corrupts the repository");
final Path repoPath = randomRepoPath();
createRepository("test-repo", "fs", repoPath);
final SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap");
logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete");
IOUtils.rm(repoPath.resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID())));
expectThrows(
SnapshotMissingException.class,
clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap")
);
}
public void testExceptionOnMissingShardLevelSnapBlob() throws IOException {
disableRepoConsistencyCheck("This test intentionally corrupts the repository");
final Path repoPath = randomRepoPath();
createRepository("test-repo", "fs", repoPath);
createIndex("test-idx-1");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
indexDoc("test-idx-1", Integer.toString(i), "foo", "bar" + i);
}
refresh();
final SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap");
logger.info("--> delete shard-level snap-${uuid}.dat file for one shard in this snapshot to simulate concurrent delete");
final String indexRepoId = getRepositoryData("test-repo").resolveIndexId(snapshotInfo.indices().get(0)).getId();
IOUtils.rm(
repoPath.resolve("indices")
.resolve(indexRepoId)
.resolve("0")
.resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID()))
);
expectThrows(
SnapshotMissingException.class,
clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap")
);
}
public void testGetSnapshotsWithoutIndices() throws Exception {
createRepository("test-repo", "fs");
logger.info("--> snapshot");
final SnapshotInfo snapshotInfo = assertSuccessful(
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap")
.setIndices()
.setWaitForCompletion(true)
.execute()
);
assertThat(snapshotInfo.totalShards(), is(0));
logger.info("--> verify that snapshot without index shows up in non-verbose listing");
final List<SnapshotInfo> snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo")
.setVerbose(false)
.get()
.getSnapshots();
assertThat(snapshotInfos, hasSize(1));
final SnapshotInfo found = snapshotInfos.get(0);
assertThat(found.snapshotId(), is(snapshotInfo.snapshotId()));
assertThat(found.state(), is(SnapshotState.SUCCESS));
assertThat(found.indexSnapshotDetails(), anEmptyMap());
}
/**
* Tests the following sequence of steps:
* 1. Start snapshot of two shards (both located on separate data nodes).
* 2. Have one of the shards snapshot completely and the other block
* 3. Restart the data node that completed its shard snapshot
* 4. Make sure that snapshot status APIs show correct file-counts and -sizes for non-restarted nodes
* 5. Make sure the description string is set for shard snapshots on restarted nodes.
*
* @throws Exception on failure
*/
public void testCorrectCountsForDoneShards() throws Exception {
final String indexOne = "index-1";
final String indexTwo = "index-2";
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final String dataNodeOne = dataNodes.get(0);
final String dataNodeTwo = dataNodes.get(1);
createIndex(indexOne, singleShardOneNode(dataNodeOne));
indexDoc(indexOne, "some_doc_id", "foo", "bar");
createIndex(indexTwo, singleShardOneNode(dataNodeTwo));
indexDoc(indexTwo, "some_doc_id", "foo", "bar");
final String repoName = "test-repo";
createRepository(repoName, "mock");
blockDataNode(repoName, dataNodeOne);
final String snapshotOne = "snap-1";
// restarting a data node below so using a master client here
final ActionFuture<CreateSnapshotResponse> responseSnapshotOne = internalCluster().masterClient()
.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotOne)
.setWaitForCompletion(true)
.execute();
assertBusy(() -> {
final SnapshotStatus snapshotStatusOne = getSnapshotStatus(repoName, snapshotOne);
final SnapshotIndexShardStatus snapshotShardState = stateFirstShard(snapshotStatusOne, indexTwo);
assertThat(snapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE));
assertThat(snapshotShardState.getStats().getTotalFileCount(), greaterThan(0));
assertThat(snapshotShardState.getStats().getTotalSize(), greaterThan(0L));
assertNull("expected a null description for snapshot shard status: " + snapshotShardState, snapshotShardState.getDescription());
}, 30L, TimeUnit.SECONDS);
internalCluster().restartNode(dataNodeTwo);
final var snapshotStatusAfterRestart = getSnapshotStatus(repoName, snapshotOne);
final var snapshotShardStateIndexTwo = stateFirstShard(snapshotStatusAfterRestart, indexTwo);
assertThat(snapshotShardStateIndexTwo.getStage(), is(SnapshotIndexShardStage.DONE));
assertNotNull("expected a non-null description string for missing stats", snapshotShardStateIndexTwo.getDescription());
final var missingStats = snapshotShardStateIndexTwo.getStats();
assertThat(missingStats.getTotalFileCount(), equalTo(-1));
assertThat(missingStats.getTotalSize(), equalTo(-1L));
final var snapshotShardStateIndexOne = stateFirstShard(snapshotStatusAfterRestart, indexOne);
assertNull("expected a null description string for available stats", snapshotShardStateIndexOne.getDescription());
assertThat(snapshotShardStateIndexOne.getStats().getTotalFileCount(), greaterThan(0));
assertThat(snapshotShardStateIndexOne.getStats().getTotalSize(), greaterThan(0L));
unblockAllDataNodes(repoName);
assertThat(responseSnapshotOne.get().getSnapshotInfo().state(), is(SnapshotState.SUCCESS));
// indexing another document to the second index so it will do writes during the snapshot and we can block on those writes
indexDoc(indexTwo, "some_other_doc_id", "foo", "other_bar");
blockDataNode(repoName, dataNodeTwo);
final String snapshotTwo = "snap-2";
final ActionFuture<CreateSnapshotResponse> responseSnapshotTwo = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
repoName,
snapshotTwo
).setWaitForCompletion(true).execute();
waitForBlock(dataNodeTwo, repoName);
assertBusy(() -> {
final SnapshotStatus snapshotStatusOne = getSnapshotStatus(repoName, snapshotOne);
final SnapshotStatus snapshotStatusTwo = getSnapshotStatus(repoName, snapshotTwo);
final SnapshotIndexShardStatus snapshotShardStateOne = stateFirstShard(snapshotStatusOne, indexOne);
final SnapshotIndexShardStatus snapshotShardStateTwo = stateFirstShard(snapshotStatusTwo, indexOne);
assertThat(snapshotShardStateOne.getStage(), is(SnapshotIndexShardStage.DONE));
assertThat(snapshotShardStateTwo.getStage(), is(SnapshotIndexShardStage.DONE));
final int totalFilesShardOne = snapshotShardStateOne.getStats().getTotalFileCount();
final long totalSizeShardOne = snapshotShardStateOne.getStats().getTotalSize();
assertThat(totalFilesShardOne, greaterThan(0));
assertThat(totalSizeShardOne, greaterThan(0L));
assertThat(totalFilesShardOne, equalTo(snapshotShardStateTwo.getStats().getTotalFileCount()));
assertThat(totalSizeShardOne, equalTo(snapshotShardStateTwo.getStats().getTotalSize()));
assertThat(snapshotShardStateTwo.getStats().getIncrementalFileCount(), equalTo(0));
assertThat(snapshotShardStateTwo.getStats().getIncrementalSize(), equalTo(0L));
}, 30L, TimeUnit.SECONDS);
unblockAllDataNodes(repoName);
final SnapshotInfo snapshotInfo = responseSnapshotTwo.get().getSnapshotInfo();
assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
assertTrue(snapshotInfo.indexSnapshotDetails().toString(), snapshotInfo.indexSnapshotDetails().containsKey(indexOne));
final SnapshotInfo.IndexSnapshotDetails indexSnapshotDetails = snapshotInfo.indexSnapshotDetails().get(indexOne);
assertThat(indexSnapshotDetails.toString(), indexSnapshotDetails.getShardCount(), equalTo(1));
assertThat(indexSnapshotDetails.toString(), indexSnapshotDetails.getMaxSegmentsPerShard(), greaterThanOrEqualTo(1));
assertThat(indexSnapshotDetails.toString(), indexSnapshotDetails.getSize().getBytes(), greaterThan(0L));
}
public void testGetSnapshotsNoRepos() {
ensureGreen();
GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, new String[] { "_all" })
.setSnapshots(randomFrom("_all", "*"))
.get();
assertTrue(getSnapshotsResponse.getSnapshots().isEmpty());
}
public void testGetSnapshotsMultipleRepos() throws Exception {
final Client client = client();
List<String> snapshotList = new ArrayList<>();
List<String> repoList = new ArrayList<>();
Map<String, List<String>> repo2SnapshotNames = new HashMap<>();
logger.info("--> create an index and index some documents");
final String indexName = "test-idx";
createIndexWithRandomDocs(indexName, 10);
final int numberOfShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(
client.admin().indices().prepareGetSettings(TEST_REQUEST_TIMEOUT, indexName).get().getIndexToSettings().get(indexName)
);
for (int repoIndex = 0; repoIndex < randomIntBetween(2, 5); repoIndex++) {
final String repoName = "repo" + repoIndex;
repoList.add(repoName);
final Path repoPath = randomRepoPath();
logger.info("--> create repository with name " + repoName);
assertAcked(
client.admin()
.cluster()
.preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)
.setType("fs")
.setSettings(Settings.builder().put("location", repoPath).build())
);
List<String> snapshotNames = new ArrayList<>();
repo2SnapshotNames.put(repoName, snapshotNames);
for (int snapshotIndex = 0; snapshotIndex < randomIntBetween(2, 5); snapshotIndex++) {
final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
snapshotList.add(snapshotName);
// Wait for at least 1ms to ensure that snapshots can be ordered by timestamp deterministically
for (final ThreadPool threadPool : internalCluster().getInstances(ThreadPool.class)) {
final long startMillis = threadPool.absoluteTimeInMillis();
assertBusy(() -> assertThat(threadPool.absoluteTimeInMillis(), greaterThan(startMillis)));
}
logger.info("--> create snapshot with index {} and name {} in repository {}", snapshotIndex, snapshotName, repoName);
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)
.setWaitForCompletion(true)
.setIndices(indexName)
.get();
final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
assertThat(snapshotInfo.successfulShards(), greaterThan(0));
assertTrue(snapshotInfo.indexSnapshotDetails().containsKey(indexName));
final SnapshotInfo.IndexSnapshotDetails indexSnapshotDetails = snapshotInfo.indexSnapshotDetails().get(indexName);
assertThat(indexSnapshotDetails.getShardCount(), equalTo(numberOfShards));
assertThat(indexSnapshotDetails.getMaxSegmentsPerShard(), greaterThanOrEqualTo(1));
assertThat(indexSnapshotDetails.getSize().getBytes(), greaterThan(0L));
snapshotNames.add(snapshotName);
}
}
logger.info("--> get and verify snapshots");
GetSnapshotsResponse getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(
TEST_REQUEST_TIMEOUT,
randomFrom(new String[] { "_all" }, new String[] { "repo*" }, repoList.toArray(new String[0]))
)
.setSnapshots(randomFrom("_all", "*"))
.get();
for (Map.Entry<String, List<String>> repo2Names : repo2SnapshotNames.entrySet()) {
String repo = repo2Names.getKey();
List<String> snapshotNames = repo2Names.getValue();
List<SnapshotInfo> snapshots = getSnapshotsResponse.getSnapshots();
assertEquals(
snapshotNames,
snapshots.stream().filter(s -> s.repository().equals(repo)).map(s -> s.snapshotId().getName()).toList()
);
}
logger.info("--> specify all snapshot names with ignoreUnavailable=false");
final var failingFuture = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*"))
.setIgnoreUnavailable(false)
.setSnapshots(snapshotList.toArray(new String[0]))
.execute();
expectThrows(SnapshotMissingException.class, failingFuture::actionGet);
logger.info("--> specify all snapshot names with ignoreUnavailable=true");
GetSnapshotsResponse getSnapshotsResponse3 = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*"))
.setIgnoreUnavailable(true)
.setSnapshots(snapshotList.toArray(new String[0]))
.get();
for (Map.Entry<String, List<String>> repo2Names : repo2SnapshotNames.entrySet()) {
String repo = repo2Names.getKey();
List<String> snapshotNames = repo2Names.getValue();
List<SnapshotInfo> snapshots = getSnapshotsResponse3.getSnapshots();
assertEquals(
snapshotNames,
snapshots.stream().filter(s -> s.repository().equals(repo)).map(s -> s.snapshotId().getName()).toList()
);
}
}
public void testGetSnapshotsWithSnapshotInProgress() throws Exception {
createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()).put("block_on_data", true));
String indexName = "test-idx-1";
createIndexWithContent(indexName, indexSettingsNoReplicas(randomIntBetween(2, 10)).build());
ensureGreen();
ActionFuture<CreateSnapshotResponse> createSnapshotResponseActionFuture = startFullSnapshot("test-repo", "test-snap");
logger.info("--> wait for data nodes to get blocked");
waitForBlockOnAnyDataNode("test-repo");
awaitNumberOfSnapshotsInProgress(1);
logger.info("--> wait for snapshots to get to a consistent state");
awaitClusterState(state -> {
SnapshotsInProgress snapshotsInProgress = SnapshotsInProgress.get(state);
Set<Snapshot> snapshots = snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::snapshot).collect(Collectors.toSet());
if (snapshots.size() != 1) {
return false;
}
var shards = snapshotsInProgress.snapshot(snapshots.iterator().next()).shards();
long initShards = shards.values().stream().filter(v -> v.state() == SnapshotsInProgress.ShardState.INIT).count();
long successShards = shards.entrySet()
.stream()
.filter(e -> e.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS)
.count();
return successShards == shards.size() - 1 && initShards == 1;
});
GetSnapshotsResponse response1 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots("test-snap")
.setIgnoreUnavailable(true)
.get();
List<SnapshotInfo> snapshotInfoList = response1.getSnapshots();
assertEquals(1, snapshotInfoList.size());
SnapshotInfo snapshotInfo = snapshotInfoList.get(0);
assertEquals(SnapshotState.IN_PROGRESS, snapshotInfo.state());
SnapshotStatus snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get().getSnapshots().get(0);
assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getTotalShards()));
assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getDoneShards()));
assertThat(snapshotInfo.shardFailures().size(), equalTo(0));
String notExistedSnapshotName = "snapshot_not_exist";
GetSnapshotsResponse response2 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots(notExistedSnapshotName)
.setIgnoreUnavailable(true)
.get();
assertEquals(0, response2.getSnapshots().size());
expectThrows(
SnapshotMissingException.class,
clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots(notExistedSnapshotName)
.setIgnoreUnavailable(false)
);
logger.info("--> unblock all data nodes");
unblockAllDataNodes("test-repo");
assertSuccessful(createSnapshotResponseActionFuture);
}
public void testSnapshotStatusOnFailedSnapshot() throws Exception {
String repoName = "test-repo";
createRepositoryNoVerify(repoName, "fs"); // mustn't load the repository data before we inject the broken snapshot
final String snapshot = "test-snap-1";
addBwCFailedSnapshot(repoName, snapshot, Collections.emptyMap());
logger.info("--> creating good index");
assertAcked(prepareCreate("test-idx-good").setSettings(indexSettingsNoReplicas(1)));
ensureGreen();
indexRandomDocs("test-idx-good", randomIntBetween(1, 5));
final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName)
.setSnapshots(snapshot)
.get();
assertEquals(1, snapshotsStatusResponse.getSnapshots().size());
assertEquals(SnapshotsInProgress.State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState());
}
public void testGetSnapshotsRequest() throws Exception {
final String repositoryName = "test-repo";
final String indexName = "test-idx";
final Client client = client();
createRepository(
repositoryName,
"mock",
Settings.builder()
.put("location", randomRepoPath())
.put("compress", false)
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
.put("wait_after_unblock", 200)
);
logger.info("--> get snapshots on an empty repository");
expectThrows(
SnapshotMissingException.class,
client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName).addSnapshots("non-existent-snapshot")
);
// with ignore unavailable set to true, should not throw an exception
GetSnapshotsResponse getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName)
.setIgnoreUnavailable(true)
.addSnapshots("non-existent-snapshot")
.get();
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
logger.info("--> creating an index and indexing documents");
// Create index on 2 nodes and make sure each node has a primary by setting no replicas
assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
ensureGreen();
indexRandomDocs(indexName, 10);
// make sure we return only the in-progress snapshot when taking the first snapshot on a clean repository
// take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned
// block a node so the create snapshot operation can remain in progress
final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, "snap-on-empty-repo")
.setWaitForCompletion(false)
.setIndices(indexName)
.get();
waitForBlock(initialBlockedNode, repositoryName); // wait for block to kick in
getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo")
.setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo"))
.get();
assertEquals(1, getSnapshotsResponse.getSnapshots().size());
assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName());
unblockNode(repositoryName, initialBlockedNode); // unblock node
startDeleteSnapshot(repositoryName, "snap-on-empty-repo").get();
final int numSnapshots = randomIntBetween(1, 3) + 1;
logger.info("--> take {} snapshot(s)", numSnapshots - 1);
final String[] snapshotNames = new String[numSnapshots];
for (int i = 0; i < numSnapshots - 1; i++) {
final String snapshotName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName)
.setWaitForCompletion(true)
.setIndices(indexName)
.get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
snapshotNames[i] = snapshotName;
}
logger.info("--> take another snapshot to be in-progress");
// add documents so there are data files to block on
for (int i = 10; i < 20; i++) {
indexDoc(indexName, Integer.toString(i), "foo", "bar" + i);
}
refresh();
final String inProgressSnapshot = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
snapshotNames[numSnapshots - 1] = inProgressSnapshot;
// block a node so the create snapshot operation can remain in progress
final String blockedNode = blockNodeWithIndex(repositoryName, indexName);
client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, inProgressSnapshot)
.setWaitForCompletion(false)
.setIndices(indexName)
.get();
waitForBlock(blockedNode, repositoryName); // wait for block to kick in
logger.info("--> get all snapshots with a current in-progress");
// with ignore unavailable set to true, should not throw an exception
final List<String> snapshotsToGet = new ArrayList<>();
if (randomBoolean()) {
// use _current plus the individual names of the finished snapshots
snapshotsToGet.add("_current");
for (int i = 0; i < numSnapshots - 1; i++) {
snapshotsToGet.add(snapshotNames[i]);
}
} else {
snapshotsToGet.add("_all");
}
getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName)
.setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY))
.get();
List<String> sortedNames = Arrays.asList(snapshotNames);
Collections.sort(sortedNames);
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName)
.addSnapshots(snapshotNames)
.get();
sortedNames = Arrays.asList(snapshotNames);
Collections.sort(sortedNames);
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
logger.info("--> make sure duplicates are not returned in the response");
String regexName = snapshotNames[randomIntBetween(0, numSnapshots - 1)];
final int splitPos = regexName.length() / 2;
final String firstRegex = regexName.substring(0, splitPos) + "*";
final String secondRegex = "*" + regexName.substring(splitPos);
getSnapshotsResponse = client.admin()
.cluster()
.prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName)
.addSnapshots(snapshotNames)
.addSnapshots(firstRegex, secondRegex)
.get();
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
unblockNode(repositoryName, blockedNode); // unblock node
awaitNoMoreRunningOperations();
}
public void testConcurrentCreateAndStatusAPICalls() throws Exception {
final var indexNames = IntStream.range(0, between(1, 10)).mapToObj(i -> "test-idx-" + i).toList();
indexNames.forEach(this::createIndexWithContent);
final String repoName = "test-repo";
createRepository(repoName, "fs");
if (randomBoolean()) {
// sometimes cause some deduplication
createSnapshot(repoName, "initial_snapshot", List.of());
for (final var indexName : indexNames) {
if (randomBoolean()) {
indexDoc(indexName, "another_id", "baz", "quux");
}
}
}
final int snapshots = randomIntBetween(10, 20);
final List<ActionFuture<SnapshotsStatusResponse>> statuses = new ArrayList<>(snapshots);
final List<ActionFuture<GetSnapshotsResponse>> gets = new ArrayList<>(snapshots);
final Client dataNodeClient = dataNodeClient();
final var snapshotNames = IntStream.range(0, snapshots).mapToObj(i -> "test-snap-" + i).toArray(String[]::new);
final var waitForCompletion = randomBoolean();
final var createsListener = new PlainActionFuture<Void>();
final var createsGroupedListener = new GroupedActionListener<CreateSnapshotResponse>(
snapshotNames.length,
createsListener.map(ignored -> null)
);
for (final var snapshotName : snapshotNames) {
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)
.setWaitForCompletion(waitForCompletion)
.execute(createsGroupedListener);
}
createsListener.get(60, TimeUnit.SECONDS);
// run enough parallel status requests to max out the SNAPSHOT_META threadpool
final var metaThreadPoolSize = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class)
.info(ThreadPool.Names.SNAPSHOT_META)
.getMax();
for (int i = 0; i < metaThreadPoolSize * 2; i++) {
statuses.add(
dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute()
);
gets.add(
dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute()
);
}
// ... and then some more status requests until all snapshots are done
var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
assertBusy(() -> {
final var stillRunning = SnapshotsInProgress.get(masterClusterService.state()).isEmpty() == false;
statuses.add(
dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute()
);
gets.add(
dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute()
);
assertFalse(stillRunning);
}, 60, TimeUnit.SECONDS);
for (ActionFuture<SnapshotsStatusResponse> status : statuses) {
var statusResponse = status.get();
assertThat(statusResponse.getSnapshots(), hasSize(snapshots));
for (SnapshotStatus snapshot : statusResponse.getSnapshots()) {
assertThat(snapshot.getState(), allOf(not(SnapshotsInProgress.State.FAILED), not(SnapshotsInProgress.State.ABORTED)));
for (final var shard : snapshot.getShards()) {
if (shard.getStage() == SnapshotIndexShardStage.DONE) {
assertEquals(shard.getStats().getIncrementalFileCount(), shard.getStats().getProcessedFileCount());
assertEquals(shard.getStats().getIncrementalSize(), shard.getStats().getProcessedSize());
}
}
}
}
for (ActionFuture<GetSnapshotsResponse> get : gets) {
final List<SnapshotInfo> snapshotInfos = get.get().getSnapshots();
assertThat(snapshotInfos, hasSize(snapshots));
for (SnapshotInfo snapshotInfo : snapshotInfos) {
assertThat(snapshotInfo.state(), oneOf(SnapshotState.IN_PROGRESS, SnapshotState.SUCCESS));
}
}
}
public void testInfiniteTimeout() throws Exception {
createRepository("test-repo", "mock");
createIndex("test-idx", 1, 0);
indexRandomDocs("test-idx", 10);
ensureGreen();
blockAllDataNodes("test-repo");
final ActionFuture<CreateSnapshotResponse> snapshotResponseFuture = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
"test-repo",
"test-snap"
).setWaitForCompletion(true).execute();
try {
waitForBlockOnAnyDataNode("test-repo");
// Make sure that the create-snapshot task completes on master
assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
final List<SnapshotStatus> snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo")
.setMasterNodeTimeout(TimeValue.MINUS_ONE)
.get()
.getSnapshots();
assertThat(snapshotStatus, hasSize(1));
assertEquals("test-snap", snapshotStatus.get(0).getSnapshot().getSnapshotId().getName());
// a timeout of a node-level request results in a successful response but without node-level details, so this checks no timeout:
assertThat(snapshotStatus.get(0).getShards().get(0).getStats().getTotalFileCount(), greaterThan(0));
assertFalse(snapshotResponseFuture.isDone());
} finally {
unblockAllDataNodes("test-repo");
snapshotResponseFuture.get(10, TimeUnit.SECONDS);
}
}
private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) {
return snapshotStatus.getIndices().get(indexName).getShards().get(0);
}
private static SnapshotStatus getSnapshotStatus(String repoName, String snapshotName) {
try {
return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName)
.setSnapshots(snapshotName)
.get()
.getSnapshots()
.get(0);
} catch (SnapshotMissingException e) {
throw new AssertionError(e);
}
}
private static Settings singleShardOneNode(String node) {
return indexSettingsNoReplicas(1).put("index.routing.allocation.include._name", node).build();
}
}
| SnapshotStatusApisIT |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtensionHolder.java | {
"start": 411,
"end": 541
} | interface ____ allows it to be used
* given the way {@link Node} does Guice bindings for plugin components.
* TODO: remove this | that |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityInfo.java | {
"start": 1577,
"end": 1754
} | interface ____
* @param conf configuration object.
* @return TokenInfo instance
*/
public abstract TokenInfo getTokenInfo(Class<?> protocol, Configuration conf);
}
| class |
java | netty__netty | microbench/src/main/java/io/netty/microbenchmark/common/IsValidIpV4Benchmark.java | {
"start": 1188,
"end": 2764
} | class ____ extends AbstractMicrobenchmark {
@Param({ "127.0.0.1", "255.255.255.255", "1.1.1.1", "127.0.0.256", "127.0.0.1.1", "127.0.0", "[2001::1]" })
private String ip;
public static boolean isValidIpV4AddressOld(String value) {
int periods = 0;
int i;
int length = value.length();
if (length > 15) {
return false;
}
char c;
StringBuilder word = new StringBuilder();
for (i = 0; i < length; i++) {
c = value.charAt(i);
if (c == '.') {
periods++;
if (periods > 3) {
return false;
}
if (word.length() == 0) {
return false;
}
if (Integer.parseInt(word.toString()) > 255) {
return false;
}
word.delete(0, word.length());
} else if (!Character.isDigit(c)) {
return false;
} else {
if (word.length() > 2) {
return false;
}
word.append(c);
}
}
if (word.length() == 0 || Integer.parseInt(word.toString()) > 255) {
return false;
}
return periods == 3;
}
// Tests
@Benchmark
public boolean isValidIpV4AddressOld() {
return isValidIpV4AddressOld(ip);
}
@Benchmark
public boolean isValidIpV4AddressNew() {
return NetUtil.isValidIpV4Address(ip);
}
}
| IsValidIpV4Benchmark |
java | elastic__elasticsearch | modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ExplicitProtocolRestIT.java | {
"start": 1297,
"end": 3275
} | class ____ extends AbstractRepositoryS3RestTestCase {
private static final String PREFIX = getIdentifierPrefix("RepositoryS3ExplicitProtocolRestIT");
private static final String BUCKET = PREFIX + "bucket";
private static final String BASE_PATH = PREFIX + "base_path";
private static final String ACCESS_KEY = PREFIX + "access-key";
private static final String SECRET_KEY = PREFIX + "secret-key";
private static final String CLIENT = "explicit_protocol_client";
private static final Supplier<String> regionSupplier = new DynamicRegionSupplier();
private static final S3HttpFixture s3Fixture = new S3HttpFixture(
true,
BUCKET,
BASE_PATH,
fixedAccessKey(ACCESS_KEY, regionSupplier, "s3")
);
private static String getEndpoint() {
final var s3FixtureAddress = s3Fixture.getAddress();
assertThat(s3FixtureAddress, startsWith("http://"));
return s3FixtureAddress.substring("http://".length());
}
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-s3")
.systemProperty("aws.region", regionSupplier)
.keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY)
.keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY)
.setting("s3.client." + CLIENT + ".endpoint", RepositoryS3ExplicitProtocolRestIT::getEndpoint)
.setting("s3.client." + CLIENT + ".protocol", () -> "http")
.build();
@ClassRule
public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster);
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
protected String getBucketName() {
return BUCKET;
}
@Override
protected String getBasePath() {
return BASE_PATH;
}
@Override
protected String getClientName() {
return CLIENT;
}
}
| RepositoryS3ExplicitProtocolRestIT |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java | {
"start": 10242,
"end": 10436
} | class ____ implements IFileSystem {
/**
* The possible write modes. The write mode decides what happens if a file should be created,
* but already exists.
*/
public | FileSystem |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/querycache/EntityWithCollectionReloadCacheTest.java | {
"start": 1600,
"end": 6004
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction( session -> {
for ( String subject : new String[] { "MATH", "BIOL", "CS" } ) {
final List<Tuple> resultList = session.createQuery(
"select d.id, d.course, s " +
"from Demand d inner join d.student s left join fetch s.majors " +
"where d.course.subject.name = :subject",
Tuple.class
)
.setParameter( "subject", subject )
.setCacheable( true )
.getResultList();
assertThat( resultList ).hasSize( subject.equals( "MATH" ) ? 5 : 4 ).allSatisfy( tuple -> {
assertThat( tuple.get( 1, Course.class ).getSubject().getName() ).isEqualTo( subject );
assertThat( tuple.get( 2, Student.class ).getMajors() ).matches( Hibernate::isInitialized );
} );
}
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
// Create two majors
final Major m1 = new Major();
m1.setName( "Biology" );
session.persist( m1 );
final Major m2 = new Major();
m2.setName( "Computer Science" );
session.persist( m2 );
// Create three students
final Student s1 = new Student();
s1.setName( "Andrew" );
final StudentMajor sm1 = new StudentMajor();
sm1.setStudent( s1 );
sm1.setMajor( m1 );
sm1.setClassification( "01" );
s1.addToMajors( sm1 );
session.persist( s1 );
final Student s2 = new Student();
s2.setName( "Brian" );
final StudentMajor sm2 = new StudentMajor();
sm2.setStudent( s2 );
sm2.setMajor( m1 );
sm2.setClassification( "02" );
s2.addToMajors( sm2 );
session.persist( s2 );
final Student s3 = new Student();
s3.setName( "Charlie" );
final StudentMajor sm3 = new StudentMajor();
sm3.setStudent( s3 );
sm3.setMajor( m1 );
sm3.setClassification( "01" );
s3.addToMajors( sm3 );
final StudentMajor sm4 = new StudentMajor();
sm4.setStudent( s3 );
sm4.setMajor( m2 );
sm4.setClassification( "02" );
s3.addToMajors( sm4 );
session.persist( s3 );
// Create two subjects
final Subject math = new Subject();
math.setName( "MATH" );
session.persist( math );
final Subject biology = new Subject();
biology.setName( "BIOL" );
session.persist( biology );
final Subject cs = new Subject();
cs.setName( "CS" );
session.persist( cs );
// Create a few courses
final Course c1 = new Course();
c1.setSubject( math );
c1.setNumber( "101" );
session.persist( c1 );
final Course c2 = new Course();
c2.setSubject( math );
c2.setNumber( "201" );
session.persist( c2 );
final Course c3 = new Course();
c3.setSubject( biology );
c3.setNumber( "101" );
session.persist( c3 );
final Course c4 = new Course();
c4.setSubject( biology );
c4.setNumber( "201" );
session.persist( c4 );
final Course c5 = new Course();
c5.setSubject( cs );
c5.setNumber( "101" );
session.persist( c5 );
final Course c6 = new Course();
c6.setSubject( cs );
c6.setNumber( "201" );
session.persist( c6 );
// Create some course demands
final Demand d1 = new Demand();
d1.setCourse( c1 );
d1.setStudent( s1 );
session.persist( d1 );
final Demand d2 = new Demand();
d2.setCourse( c1 );
d2.setStudent( s2 );
session.persist( d2 );
final Demand d3 = new Demand();
d3.setCourse( c2 );
d3.setStudent( s2 );
session.persist( d3 );
final Demand d4 = new Demand();
d4.setCourse( c2 );
d4.setStudent( s3 );
session.persist( d4 );
final Demand d5 = new Demand();
d5.setCourse( c3 );
d5.setStudent( s1 );
session.persist( d5 );
final Demand d6 = new Demand();
d6.setCourse( c3 );
d6.setStudent( s3 );
session.persist( d6 );
final Demand d7 = new Demand();
d7.setCourse( c4 );
d7.setStudent( s1 );
session.persist( d7 );
final Demand d8 = new Demand();
d8.setCourse( c5 );
d8.setStudent( s2 );
session.persist( d8 );
final Demand d9 = new Demand();
d9.setCourse( c6 );
d9.setStudent( s2 );
session.persist( d9 );
final Demand d0 = new Demand();
d0.setCourse( c6 );
d0.setStudent( s3 );
session.persist( d0 );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
}
@Entity( name = "Course" )
static | EntityWithCollectionReloadCacheTest |
java | google__guice | core/test/com/googlecode/guice/BytecodeGenTest.java | {
"start": 2050,
"end": 4379
} | class ____ {
private final ClassLoader systemClassLoader = ClassLoader.getSystemClassLoader();
private final Module interceptorModule =
new AbstractModule() {
@Override
protected void configure() {
bindInterceptor(
any(),
any(),
new MethodInterceptor() {
@Override
public Object invoke(MethodInvocation chain) throws Throwable {
return chain.proceed() + " WORLD";
}
});
}
};
private final Module noopInterceptorModule =
new AbstractModule() {
@Override
protected void configure() {
bindInterceptor(
any(),
any(),
new MethodInterceptor() {
@Override
public Object invoke(MethodInvocation chain) throws Throwable {
return chain.proceed();
}
});
}
};
@Test
public void testPackageVisibility() {
Injector injector = Guice.createInjector(new PackageVisibilityTestModule());
injector.getInstance(PublicUserOfPackagePrivate.class); // This must pass.
}
@Test
public void testInterceptedPackageVisibility() {
// Test relies on package access which CHILD loading doesn't have
assumeTrue(InternalFlags.getCustomClassLoadingOption() != CustomClassLoadingOption.CHILD);
Injector injector = Guice.createInjector(interceptorModule, new PackageVisibilityTestModule());
injector.getInstance(PublicUserOfPackagePrivate.class); // This must pass.
}
@Test
public void testEnhancerNaming() {
// Test relies on package access which CHILD loading doesn't have
assumeTrue(InternalFlags.getCustomClassLoadingOption() != CustomClassLoadingOption.CHILD);
Injector injector = Guice.createInjector(interceptorModule, new PackageVisibilityTestModule());
PublicUserOfPackagePrivate pupp = injector.getInstance(PublicUserOfPackagePrivate.class);
assertTrue(
pupp.getClass()
.getName()
.startsWith(PublicUserOfPackagePrivate.class.getName() + "$$EnhancerByGuice$$"));
}
// TODO(sameb): Figure out how to test FastClass naming tests.
/** Custom URL classloader with basic visibility rules */
static | BytecodeGenTest |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/bind/annotation/ModelAttribute.java | {
"start": 3511,
"end": 4158
} | class ____:
* for example, "orderAddress" for class "mypackage.OrderAddress",
* or "orderAddressList" for "List<mypackage.OrderAddress>".
* @since 4.3
*/
@AliasFor("value")
String name() default "";
/**
* Allows data binding to be disabled directly on an {@code @ModelAttribute}
* method parameter or on the attribute returned from an {@code @ModelAttribute}
* method, both of which would prevent data binding for that attribute.
* <p>By default this is set to {@code true} in which case data binding applies.
* Set this to {@code false} to disable data binding.
* @since 4.3
*/
boolean binding() default true;
}
| name |
java | resilience4j__resilience4j | resilience4j-hedge/src/main/java/io/github/resilience4j/hedge/event/HedgeEvent.java | {
"start": 819,
"end": 968
} | interface ____ {
String getHedgeName();
Type getEventType();
ZonedDateTime getCreationTime();
Duration getDuration();
| HedgeEvent |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/xslt/XsltOutputBytesTest.java | {
"start": 1042,
"end": 1821
} | class ____ extends ContextTestSupport {
@Test
public void testXsltOutput() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("<?xml version=\"1.0\" encoding=\"UTF-8\"?><goodbye>world!</goodbye>");
mock.message(0).body().isInstanceOf(byte[].class);
template.sendBody("direct:start", "<hello>world!</hello>");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("xslt:org/apache/camel/component/xslt/example.xsl?output=bytes").to("mock:result");
}
};
}
}
| XsltOutputBytesTest |
java | apache__camel | components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/CMComponent.java | {
"start": 1383,
"end": 2810
} | class ____ extends DefaultComponent {
private Validator validator;
public CMComponent() {
}
public CMComponent(final CamelContext context) {
super(context);
}
@Override
protected Endpoint createEndpoint(final String uri, final String remaining, final Map<String, Object> parameters)
throws Exception {
CMEndpoint endpoint = new CMEndpoint(uri, this);
endpoint.setHost(remaining);
setProperties(endpoint, parameters);
// Validate configuration
final Set<ConstraintViolation<CMConfiguration>> constraintViolations
= getValidator().validate(endpoint.getConfiguration());
if (!constraintViolations.isEmpty()) {
final StringBuilder msg = new StringBuilder();
for (final ConstraintViolation<CMConfiguration> cv : constraintViolations) {
msg.append(String.format("- Invalid value for %s: %s",
cv.getPropertyPath().toString(),
cv.getMessage()));
}
throw new ResolveEndpointFailedException(uri, msg.toString());
}
return endpoint;
}
public Validator getValidator() {
if (validator == null) {
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
validator = factory.getValidator();
}
return validator;
}
}
| CMComponent |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CSEMaterials.java | {
"start": 905,
"end": 1028
} | class ____ for storing information about key type and corresponding key
* to be used for client side encryption.
*/
public | is |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/availability/ApplicationAvailabilityBeanTests.java | {
"start": 1414,
"end": 4709
} | class ____ {
private AnnotationConfigApplicationContext context;
private ApplicationAvailabilityBean availability;
private MockLog log;
@BeforeEach
void setup() {
this.context = new AnnotationConfigApplicationContext(TestConfiguration.class);
this.availability = this.context.getBean(ApplicationAvailabilityBean.class);
this.log = this.context.getBean(MockLog.class);
}
@Test
void getLivenessStateWhenNoEventHasBeenPublishedReturnsDefaultState() {
assertThat(this.availability.getLivenessState()).isEqualTo(LivenessState.BROKEN);
}
@Test
void getLivenessStateWhenEventHasBeenPublishedReturnsPublishedState() {
AvailabilityChangeEvent.publish(this.context, LivenessState.CORRECT);
assertThat(this.availability.getLivenessState()).isEqualTo(LivenessState.CORRECT);
}
@Test
void getReadinessStateWhenNoEventHasBeenPublishedReturnsDefaultState() {
assertThat(this.availability.getReadinessState()).isEqualTo(ReadinessState.REFUSING_TRAFFIC);
}
@Test
void getReadinessStateWhenEventHasBeenPublishedReturnsPublishedState() {
AvailabilityChangeEvent.publish(this.context, ReadinessState.ACCEPTING_TRAFFIC);
assertThat(this.availability.getReadinessState()).isEqualTo(ReadinessState.ACCEPTING_TRAFFIC);
}
@Test
void getStateWhenNoEventHasBeenPublishedReturnsDefaultState() {
assertThat(this.availability.getState(TestState.class)).isNull();
assertThat(this.availability.getState(TestState.class, TestState.ONE)).isEqualTo(TestState.ONE);
}
@Test
void getStateWhenEventHasBeenPublishedReturnsPublishedState() {
AvailabilityChangeEvent.publish(this.context, TestState.TWO);
assertThat(this.availability.getState(TestState.class)).isEqualTo(TestState.TWO);
assertThat(this.availability.getState(TestState.class, TestState.ONE)).isEqualTo(TestState.TWO);
}
@Test
void getLastChangeEventWhenNoEventHasBeenPublishedReturnsDefaultState() {
assertThat(this.availability.getLastChangeEvent(TestState.class)).isNull();
}
@Test
void getLastChangeEventWhenEventHasBeenPublishedReturnsPublishedState() {
AvailabilityChangeEvent.publish(this.context, TestState.TWO);
assertThat(this.availability.getLastChangeEvent(TestState.class)).isNotNull();
}
@Test
void stateChangesAreLogged() {
AvailabilityChangeEvent.publish(this.context, LivenessState.CORRECT);
assertThat(this.log.getLogged()).contains("Application availability state LivenessState changed to CORRECT");
AvailabilityChangeEvent.publish(this.context, LivenessState.BROKEN);
assertThat(this.log.getLogged())
.contains("Application availability state LivenessState changed from CORRECT to BROKEN");
}
@Test
void stateChangesAreLoggedWithExceptionSource() {
AvailabilityChangeEvent.publish(this.context, new IOException("connection error"), LivenessState.BROKEN);
assertThat(this.log.getLogged()).contains("Application availability state LivenessState changed to BROKEN: "
+ "java.io.IOException: connection error");
}
@Test
void stateChangesAreLoggedWithOtherSource() {
AvailabilityChangeEvent.publish(this.context, new CustomEventSource(), LivenessState.BROKEN);
assertThat(this.log.getLogged()).contains(
"Application availability state LivenessState changed to BROKEN: " + CustomEventSource.class.getName());
}
| ApplicationAvailabilityBeanTests |
java | google__dagger | javatests/dagger/internal/codegen/FullBindingGraphValidationTest.java | {
"start": 1009,
"end": 1322
} | class ____ {
private static final Source MODULE_WITH_ERRORS =
CompilerTests.javaSource(
"test.ModuleWithErrors",
"package test;",
"",
"import dagger.Binds;",
"import dagger.Module;",
"",
"@Module",
" | FullBindingGraphValidationTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java | {
"start": 1310,
"end": 2288
} | class ____ implements ErasureCodingStep {
private ECBlock[] inputBlocks;
private ECBlock[] outputBlocks;
private RawErasureEncoder rawEncoder;
/**
* The constructor with all the necessary info.
* @param inputBlocks inputBlocks.
* @param outputBlocks outputBlocks.
* @param rawEncoder rawEncoder.
*/
public ErasureEncodingStep(ECBlock[] inputBlocks, ECBlock[] outputBlocks,
RawErasureEncoder rawEncoder) {
this.inputBlocks = inputBlocks;
this.outputBlocks = outputBlocks;
this.rawEncoder = rawEncoder;
}
@Override
public void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks)
throws IOException {
rawEncoder.encode(inputChunks, outputChunks);
}
@Override
public ECBlock[] getInputBlocks() {
return inputBlocks;
}
@Override
public ECBlock[] getOutputBlocks() {
return outputBlocks;
}
@Override
public void finish() {
// do nothing
}
}
| ErasureEncodingStep |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java | {
"start": 1563,
"end": 6760
} | class ____ extends ESTestCase {
private ClusterService clusterService;
@Before
public void setUpTests() {
clusterService = mock(ClusterService.class);
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
public void testCanStartMigration_givenMigrationIsDisabled() {
Settings settings = newSettings(false);
givenClusterSettings(settings);
ClusterState clusterState = mock(ClusterState.class);
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
assertFalse(check.canStartMigration(clusterState));
}
public void testCanStartMigration_givenMissingIndex() {
Settings settings = newSettings(true);
givenClusterSettings(settings);
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")).build();
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
assertFalse(check.canStartMigration(clusterState));
}
public void testCanStartMigration_givenMlConfigIsAlias() {
Settings settings = newSettings(true);
givenClusterSettings(settings);
// index has been replaced by an alias
String reindexedName = ".reindexed_ml_config";
Metadata.Builder metadata = Metadata.builder();
RoutingTable.Builder routingTable = RoutingTable.builder();
addMlConfigIndex(reindexedName, MlConfigIndex.indexName(), metadata, routingTable);
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
.metadata(metadata)
.routingTable(routingTable.build())
.build();
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
assertTrue(check.canStartMigration(clusterState));
}
public void testCanStartMigration_givenInactiveShards() {
Settings settings = newSettings(true);
givenClusterSettings(settings);
// index is present but no routing
Metadata.Builder metadata = Metadata.builder();
RoutingTable.Builder routingTable = RoutingTable.builder();
addMlConfigIndex(metadata, routingTable);
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
.metadata(metadata)
// the difference here is that the routing table that's been created is
// _not_ added to the cluster state, simulating no route to the index
.build();
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
assertFalse(check.canStartMigration(clusterState));
}
private void addMlConfigIndex(Metadata.Builder metadata, RoutingTable.Builder routingTable) {
addMlConfigIndex(MlConfigIndex.indexName(), null, metadata, routingTable);
}
private void addMlConfigIndex(String indexName, String aliasName, Metadata.Builder metadata, RoutingTable.Builder routingTable) {
final String uuid = "_uuid";
IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName);
indexMetadata.settings(
Settings.builder()
.put(IndexMetadata.SETTING_INDEX_UUID, uuid)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
);
if (aliasName != null) {
indexMetadata.putAlias(AliasMetadata.builder(aliasName));
}
metadata.put(indexMetadata);
Index index = new Index(indexName, uuid);
ShardId shardId = new ShardId(index, 0);
ShardRouting shardRouting = ShardRouting.newUnassigned(
shardId,
true,
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""),
ShardRouting.Role.DEFAULT
);
shardRouting = shardRouting.initialize("node_id", null, 0L);
shardRouting = shardRouting.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
routingTable.add(IndexRoutingTable.builder(index).addIndexShard(IndexShardRoutingTable.builder(shardId).addShard(shardRouting)));
}
private void givenClusterSettings(Settings settings) {
ClusterSettings clusterSettings = new ClusterSettings(
settings,
new HashSet<>(Collections.singletonList(MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION))
);
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
}
private static Settings newSettings(boolean migrationEnabled) {
return Settings.builder().put(MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION.getKey(), migrationEnabled).build();
}
}
| MlConfigMigrationEligibilityCheckTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java | {
"start": 2529,
"end": 14248
} | class ____ extends
TimelineStateStore {
public static final Logger LOG =
LoggerFactory.getLogger(LeveldbTimelineStateStore.class);
private static final String DB_NAME = "timeline-state-store.ldb";
private static final FsPermission LEVELDB_DIR_UMASK = FsPermission
.createImmutable((short) 0700);
private static final byte[] TOKEN_ENTRY_PREFIX = bytes("t");
private static final byte[] TOKEN_MASTER_KEY_ENTRY_PREFIX = bytes("k");
private static final byte[] LATEST_SEQUENCE_NUMBER_KEY = bytes("s");
private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 0);
private static final byte[] TIMELINE_STATE_STORE_VERSION_KEY = bytes("v");
private DB db;
public LeveldbTimelineStateStore() {
super(LeveldbTimelineStateStore.class.getName());
}
@Override
protected void initStorage(Configuration conf) throws IOException {
}
@Override
protected void startStorage() throws IOException {
Options options = new Options();
Path dbPath =
new Path(
getConfig().get(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH),
DB_NAME);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(getConfig());
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " +
"timeline store " + dbPath);
}
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanupWithLogger(LOG, localFS);
}
JniDBFactory factory = new JniDBFactory();
try {
options.createIfMissing(false);
db = factory.open(new File(dbPath.toString()), options);
LOG.info("Loading the existing database at th path: " + dbPath.toString());
checkVersion();
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
try {
options.createIfMissing(true);
db = factory.open(new File(dbPath.toString()), options);
LOG.info("Creating a new database at th path: " + dbPath.toString());
storeVersion(CURRENT_VERSION_INFO);
} catch (DBException ex) {
throw new IOException(ex);
}
} else {
throw new IOException(e);
}
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
protected void closeStorage() throws IOException {
IOUtils.cleanupWithLogger(LOG, db);
}
@Override
public TimelineServiceState loadState() throws IOException {
LOG.info("Loading timeline service state from leveldb");
TimelineServiceState state = new TimelineServiceState();
int numKeys = loadTokenMasterKeys(state);
int numTokens = loadTokens(state);
loadLatestSequenceNumber(state);
LOG.info("Loaded " + numKeys + " master keys and " + numTokens
+ " tokens from leveldb, and latest sequence number is "
+ state.getLatestSequenceNumber());
return state;
}
@Override
public void storeToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
DataOutputStream ds = null;
WriteBatch batch = null;
try {
byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
if (db.get(k) != null) {
throw new IOException(tokenId + " already exists");
}
byte[] v = buildTokenData(tokenId, renewDate);
ByteArrayOutputStream bs = new ByteArrayOutputStream();
ds = new DataOutputStream(bs);
ds.writeInt(tokenId.getSequenceNumber());
batch = db.createWriteBatch();
batch.put(k, v);
batch.put(LATEST_SEQUENCE_NUMBER_KEY, bs.toByteArray());
db.write(batch);
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanupWithLogger(LOG, ds);
IOUtils.cleanupWithLogger(LOG, batch);
}
}
@Override
public void updateToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
try {
byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
if (db.get(k) == null) {
throw new IOException(tokenId + " doesn't exist");
}
byte[] v = buildTokenData(tokenId, renewDate);
db.put(k, v);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void removeToken(TimelineDelegationTokenIdentifier tokenId)
throws IOException {
try {
byte[] key = createTokenEntryKey(tokenId.getSequenceNumber());
db.delete(key);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
try {
byte[] k = createTokenMasterKeyEntryKey(key.getKeyId());
if (db.get(k) != null) {
throw new IOException(key + " already exists");
}
byte[] v = buildTokenMasterKeyData(key);
db.put(k, v);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void removeTokenMasterKey(DelegationKey key) throws IOException {
try {
byte[] k = createTokenMasterKeyEntryKey(key.getKeyId());
db.delete(k);
} catch (DBException e) {
throw new IOException(e);
}
}
private static byte[] buildTokenData(
TimelineDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
TimelineDelegationTokenIdentifierData data =
new TimelineDelegationTokenIdentifierData(tokenId, renewDate);
return data.toByteArray();
}
private static byte[] buildTokenMasterKeyData(DelegationKey key)
throws IOException {
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
} finally {
IOUtils.cleanupWithLogger(LOG, dataStream);
}
return memStream.toByteArray();
}
private static void loadTokenMasterKeyData(TimelineServiceState state,
byte[] keyData)
throws IOException {
DelegationKey key = new DelegationKey();
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(keyData));
try {
key.readFields(in);
} finally {
IOUtils.cleanupWithLogger(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
private static void loadTokenData(TimelineServiceState state, byte[] tokenData)
throws IOException {
TimelineDelegationTokenIdentifierData data =
new TimelineDelegationTokenIdentifierData();
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(tokenData));
try {
data.readFields(in);
} finally {
IOUtils.cleanupWithLogger(LOG, in);
}
state.tokenState.put(data.getTokenIdentifier(), data.getRenewDate());
}
private int loadTokenMasterKeys(TimelineServiceState state)
throws IOException {
byte[] base = KeyBuilder.newInstance().add(TOKEN_MASTER_KEY_ENTRY_PREFIX)
.getBytesForLookup();
int numKeys = 0;
LeveldbIterator iterator = null;
try {
for (iterator = new LeveldbIterator(db), iterator.seek(base);
iterator.hasNext(); iterator.next()) {
byte[] k = iterator.peekNext().getKey();
if (!prefixMatches(base, base.length, k)) {
break;
}
byte[] v = iterator.peekNext().getValue();
loadTokenMasterKeyData(state, v);
++numKeys;
}
} finally {
IOUtils.cleanupWithLogger(LOG, iterator);
}
return numKeys;
}
private int loadTokens(TimelineServiceState state) throws IOException {
byte[] base = KeyBuilder.newInstance().add(TOKEN_ENTRY_PREFIX)
.getBytesForLookup();
int numTokens = 0;
LeveldbIterator iterator = null;
try {
for (iterator = new LeveldbIterator(db), iterator.seek(base);
iterator.hasNext(); iterator.next()) {
byte[] k = iterator.peekNext().getKey();
if (!prefixMatches(base, base.length, k)) {
break;
}
byte[] v = iterator.peekNext().getValue();
loadTokenData(state, v);
++numTokens;
}
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanupWithLogger(LOG, iterator);
}
return numTokens;
}
private void loadLatestSequenceNumber(TimelineServiceState state)
throws IOException {
byte[] data = null;
try {
data = db.get(LATEST_SEQUENCE_NUMBER_KEY);
} catch (DBException e) {
throw new IOException(e);
}
if (data != null) {
DataInputStream in = new DataInputStream(new ByteArrayInputStream(data));
try {
state.latestSequenceNumber = in.readInt();
} finally {
IOUtils.cleanupWithLogger(LOG, in);
}
}
}
/**
* Creates a domain entity key with column name suffix, of the form
* TOKEN_ENTRY_PREFIX + sequence number.
*/
private static byte[] createTokenEntryKey(int seqNum) throws IOException {
return KeyBuilder.newInstance().add(TOKEN_ENTRY_PREFIX)
.add(Integer.toString(seqNum)).getBytes();
}
/**
* Creates a domain entity key with column name suffix, of the form
* TOKEN_MASTER_KEY_ENTRY_PREFIX + sequence number.
*/
private static byte[] createTokenMasterKeyEntryKey(int keyId)
throws IOException {
return KeyBuilder.newInstance().add(TOKEN_MASTER_KEY_ENTRY_PREFIX)
.add(Integer.toString(keyId)).getBytes();
}
@VisibleForTesting
Version loadVersion() throws IOException {
try {
byte[] data = db.get(TIMELINE_STATE_STORE_VERSION_KEY);
// if version is not stored previously, treat it as CURRENT_VERSION_INFO.
if (data == null || data.length == 0) {
return getCurrentVersion();
}
Version version =
new VersionPBImpl(
YarnServerCommonProtos.VersionProto.parseFrom(data));
return version;
} catch (DBException e) {
throw new IOException(e);
}
}
@VisibleForTesting
void storeVersion(Version state) throws IOException {
byte[] data =
((VersionPBImpl) state).getProto().toByteArray();
try {
db.put(TIMELINE_STATE_STORE_VERSION_KEY, data);
} catch (DBException e) {
throw new IOException(e);
}
}
@VisibleForTesting
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning timeline state store:
* major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of TS-store is a major upgrade, and any
* compatible change of TS-store is a minor upgrade.
* 3) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade timeline store or remove incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded timeline state store version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing timeline state store version info " + getCurrentVersion());
storeVersion(CURRENT_VERSION_INFO);
} else {
String incompatibleMessage =
"Incompatible version for timeline state store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
LOG.error(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
}
| LeveldbTimelineStateStore |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ManagedChannelImplBuilder.java | {
"start": 7930,
"end": 8185
} | interface ____ Transport implementors to provide a default port to {@link
* io.grpc.NameResolver} for use in cases where the target string doesn't include a port. The
* default implementation returns {@link GrpcUtil#DEFAULT_PORT_SSL}.
*/
public | for |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/builders/HttpSecurityAuthenticationManagerTests.java | {
"start": 3358,
"end": 3873
} | class ____ {
static final AuthenticationManager AUTHENTICATION_MANAGER = mock(AuthenticationManager.class);
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((authz) -> authz
.anyRequest().authenticated()
)
.httpBasic(withDefaults())
.authenticationManager(AUTHENTICATION_MANAGER);
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | AuthenticationManagerConfig |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/extractor/ScriptFieldTests.java | {
"start": 592,
"end": 2979
} | class ____ extends ESTestCase {
public void testKeyword() {
SearchHit hit = new SearchHitBuilder(42).addField("a_keyword", "bar").build();
ExtractedField field = new ScriptField("a_keyword");
assertThat(field.value(hit, new SourceSupplier(hit)), equalTo(new String[] { "bar" }));
assertThat(field.getName(), equalTo("a_keyword"));
assertThat(field.getSearchField(), equalTo("a_keyword"));
assertThat(field.getTypes().isEmpty(), is(true));
expectThrows(UnsupportedOperationException.class, () -> field.getDocValueFormat());
assertThat(field.getMethod(), equalTo(ExtractedField.Method.SCRIPT_FIELD));
expectThrows(UnsupportedOperationException.class, () -> field.getParentField());
assertThat(field.isMultiField(), is(false));
assertThat(field.supportsFromSource(), is(false));
expectThrows(UnsupportedOperationException.class, () -> field.newFromSource());
}
public void testKeywordArray() {
SearchHit hit = new SearchHitBuilder(42).addField("array", Arrays.asList("a", "b")).build();
ExtractedField field = new ScriptField("array");
assertThat(field.value(hit, new SourceSupplier(hit)), equalTo(new String[] { "a", "b" }));
assertThat(field.getName(), equalTo("array"));
assertThat(field.getSearchField(), equalTo("array"));
assertThat(field.getTypes().isEmpty(), is(true));
expectThrows(UnsupportedOperationException.class, () -> field.getDocValueFormat());
assertThat(field.getMethod(), equalTo(ExtractedField.Method.SCRIPT_FIELD));
expectThrows(UnsupportedOperationException.class, () -> field.getParentField());
assertThat(field.isMultiField(), is(false));
assertThat(field.supportsFromSource(), is(false));
expectThrows(UnsupportedOperationException.class, () -> field.newFromSource());
ExtractedField missing = new DocValueField("missing", Collections.singleton("keyword"));
assertThat(missing.value(hit, new SourceSupplier(hit)), equalTo(new Object[0]));
}
public void testMissing() {
SearchHit hit = new SearchHitBuilder(42).addField("a_keyword", "bar").build();
ExtractedField missing = new ScriptField("missing");
assertThat(missing.value(hit, new SourceSupplier(hit)), equalTo(new Object[0]));
}
}
| ScriptFieldTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/bytearrays/ByteArrays_assertDoesNotContain_Test.java | {
"start": 1927,
"end": 6269
} | class ____ extends ByteArraysBaseTest {
@Test
void should_pass_if_actual_does_not_contain_given_values() {
arrays.assertDoesNotContain(someInfo(), actual, arrayOf(12));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_even_if_duplicated() {
arrays.assertDoesNotContain(someInfo(), actual, arrayOf(12, 12, 20));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_empty() {
assertThatIllegalArgumentException().isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), actual, emptyArray()))
.withMessage(valuesToLookForIsEmpty());
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), actual, (byte[]) null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), null, arrayOf(8)))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_contains_given_values() {
AssertionInfo info = someInfo();
byte[] expected = { 6, 8, 20 };
Throwable error = catchThrowable(() -> arrays.assertDoesNotContain(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContain(actual, expected, newLinkedHashSet((byte) 6, (byte) 8)));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, arrayOf(12));
}
@Test
void should_pass_if_actual_does_not_contain_given_values_even_if_duplicated_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, arrayOf(12, 12, 20));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_empty_whatever_custom_comparison_strategy_is() {
assertThatIllegalArgumentException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
actual,
emptyArray()))
.withMessage(valuesToLookForIsEmpty());
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
actual,
(byte[]) null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
null,
arrayOf(-8)))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_contains_given_values_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
byte[] expected = { 6, -8, 20 };
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContain(actual, expected, newLinkedHashSet((byte) 6, (byte) -8),
absValueComparisonStrategy));
}
}
| ByteArrays_assertDoesNotContain_Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/TaskConfig.java | {
"start": 50987,
"end": 51973
} | class ____
final String className = this.config.getString(classNameKey, null);
if (className == null) {
return null;
}
// instantiate the class
@SuppressWarnings("unchecked")
final Class<TypeSerializerFactory<T>> superClass =
(Class<TypeSerializerFactory<T>>) (Class<?>) TypeSerializerFactory.class;
final TypeSerializerFactory<T> factory;
try {
Class<? extends TypeSerializerFactory<T>> clazz =
Class.forName(className, true, cl).asSubclass(superClass);
factory = InstantiationUtil.instantiate(clazz, superClass);
} catch (ClassNotFoundException cnfex) {
throw new RuntimeException(
"The class '"
+ className
+ "', noted in the configuration as "
+ "serializer factory, could not be found. It is not part of the user code's | name |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/routebuilder/SpringTemplatedRoutePrefixIdTest.java | {
"start": 1226,
"end": 2464
} | class ____ extends SpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/spring/routebuilder/SpringTemplatedRoutePrefixIdTest.xml");
}
@Test
public void testPrefixId() throws Exception {
assertEquals(2, context.getRouteDefinitions().size());
assertEquals(2, context.getRoutes().size());
assertEquals("Started", context.getRouteController().getRouteStatus("first").name());
assertEquals("Started", context.getRouteController().getRouteStatus("second").name());
assertEquals("true", context.getRoute("first").getProperties().get(Route.TEMPLATE_PROPERTY));
assertEquals("true", context.getRoute("second").getProperties().get(Route.TEMPLATE_PROPERTY));
template.sendBody("direct:one", "Hello Cheese");
template.sendBody("direct:two", "Hello Cake");
assertMockEndpointsSatisfied();
// all nodes should include prefix
Assertions.assertEquals(3, context.getRoute("first").filter("aaa*").size());
Assertions.assertEquals(3, context.getRoute("second").filter("bbb*").size());
}
}
| SpringTemplatedRoutePrefixIdTest |
java | quarkusio__quarkus | extensions/hibernate-search-standalone-elasticsearch/deployment/src/test/java/io/quarkus/hibernate/search/standalone/elasticsearch/test/configuration/NoConfigNoIndexedEntityTest.java | {
"start": 478,
"end": 1117
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class));
// When having no indexed entities, no configuration, no datasource,
// as long as the Hibernate Search beans are not injected anywhere,
// we should still be able to start the application.
@Test
public void testBootSucceedsButHibernateSearchDeactivated() {
// ... but Hibernate Search's beans should not be available.
assertThat(Arc.container().instance(SearchMapping.class).get()).isNull();
}
}
| NoConfigNoIndexedEntityTest |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/aspectj/annotation/AbstractAspectJAdvisorFactoryTests.java | {
"start": 26882,
"end": 27039
} | class ____ {
Object echo(Object obj) throws Exception {
if (obj instanceof Exception ex) {
throw ex;
}
return obj;
}
}
@Aspect
static | Echo |
java | grpc__grpc-java | netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java | {
"start": 32501,
"end": 33292
} | class ____
implements ProtocolNegotiator.ClientFactory {
private final SslContext sslContext;
private final X509TrustManager x509ExtendedTrustManager;
public TlsProtocolNegotiatorClientFactory(SslContext sslContext,
X509TrustManager x509ExtendedTrustManager) {
this.sslContext = Preconditions.checkNotNull(sslContext, "sslContext");
this.x509ExtendedTrustManager = x509ExtendedTrustManager;
}
@Override public ProtocolNegotiator newNegotiator() {
return tls(sslContext, x509ExtendedTrustManager);
}
@Override public int getDefaultPort() {
return GrpcUtil.DEFAULT_PORT_SSL;
}
}
/** A tuple of (host, port). */
@VisibleForTesting
static final | TlsProtocolNegotiatorClientFactory |
java | netty__netty | transport/src/test/java/io/netty/channel/DefaultChannelPipelineTest.java | {
"start": 74063,
"end": 74527
} | class ____ implements Runnable {
private final ChannelPipeline pipeline;
private final CountDownLatch latch;
TestTask(ChannelPipeline pipeline, CountDownLatch latch) {
this.pipeline = pipeline;
this.latch = latch;
}
@Override
public void run() {
pipeline.addLast(new ChannelInboundHandlerAdapter());
latch.countDown();
}
}
private static final | TestTask |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/LoaderUtil.java | {
"start": 8993,
"end": 9164
} | class ____. All checked reflective operation
* exceptions are translated into equivalent {@link LinkageError} classes.
*
* @param className fully qualified | name |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java | {
"start": 3695,
"end": 12879
} | class ____ extends HtmlBlock implements
YarnWebParams {
private final Context nmContext;
private final LogAggregationFileControllerFactory factory;
@Inject
public ContainersLogsBlock(Context context) {
this.nmContext = context;
this.factory = new LogAggregationFileControllerFactory(
context.getConf());
}
@Override
protected void render(Block html) {
String redirectUrl = $(REDIRECT_URL);
if (redirectUrl !=null && redirectUrl.equals("false")) {
html.h1("Failed while trying to construct the redirect url to the log" +
" server. Log Server url may not be configured");
//Intentional fallthrough.
}
ContainerId containerId;
ApplicationId appId;
try {
containerId = ContainerId.fromString($(CONTAINER_ID));
appId = containerId.getApplicationAttemptId().getApplicationId();
} catch (IllegalArgumentException ex) {
html.h1("Invalid container ID: " + $(CONTAINER_ID));
return;
}
LogAggregationFileController fileController = null;
boolean foundAggregatedLogs = false;
try {
fileController = this.factory.getFileControllerForRead(
appId, $(APP_OWNER));
foundAggregatedLogs = true;
} catch (IOException fnf) {
// Do Nothing
}
try {
if ($(CONTAINER_LOG_TYPE).isEmpty()) {
html.h2("Local Logs:");
List<File> logFiles = ContainerLogsUtils.getContainerLogDirs(containerId,
request().getRemoteUser(), nmContext);
printLocalLogFileDirectory(html, logFiles);
if (foundAggregatedLogs) {
// print out the aggregated logs if exists
try {
ContainerLogsRequest logRequest = new ContainerLogsRequest();
logRequest.setAppId(appId);
logRequest.setAppOwner($(APP_OWNER));
logRequest.setContainerId($(CONTAINER_ID));
logRequest.setNodeId(this.nmContext.getNodeId().toString());
List<ContainerLogMeta> containersLogMeta = fileController
.readAggregatedLogsMeta(logRequest);
if (containersLogMeta != null && !containersLogMeta.isEmpty()) {
html.h2("Aggregated Logs:");
printAggregatedLogFileDirectory(html, containersLogMeta);
}
} catch (Exception ex) {
LOG.debug("{}", ex);
}
}
} else {
String aggregationType = $(LOG_AGGREGATION_TYPE);
if (aggregationType == null || aggregationType.isEmpty() ||
aggregationType.trim().toLowerCase().equals(
LOG_AGGREGATION_LOCAL_TYPE)) {
File logFile = ContainerLogsUtils.getContainerLogFile(containerId,
$(CONTAINER_LOG_TYPE), request().getRemoteUser(), nmContext);
printLocalLogFile(html, logFile);
} else if (!LOG_AGGREGATION_LOCAL_TYPE.trim().toLowerCase().equals(
aggregationType) && !LOG_AGGREGATION_REMOTE_TYPE.trim()
.toLowerCase().equals(aggregationType)) {
html.h1("Invalid value for query parameter: "
+ LOG_AGGREGATION_TYPE + ". "
+ "The valid value could be either "
+ LOG_AGGREGATION_LOCAL_TYPE + " or "
+ LOG_AGGREGATION_REMOTE_TYPE + ".");
}
}
} catch (YarnException ex) {
html.h1(ex.getMessage());
} catch (NotFoundException ex) {
html.h1(ex.getMessage());
}
}
private void printLocalLogFile(Block html, File logFile) {
long start =
$("start").isEmpty() ? -4 * 1024 : Long.parseLong($("start"));
start = start < 0 ? logFile.length() + start : start;
start = start < 0 ? 0 : start;
long end =
$("end").isEmpty() ? logFile.length() : Long.parseLong($("end"));
end = end < 0 ? logFile.length() + end : end;
end = end < 0 ? logFile.length() : end;
if (start > end) {
html.h1("Invalid start and end values. Start: [" + start + "]"
+ ", end[" + end + "]");
return;
} else {
FileInputStream logByteStream = null;
try {
logByteStream = ContainerLogsUtils.openLogFileForRead($(CONTAINER_ID),
logFile, nmContext);
} catch (IOException ex) {
html.h1(ex.getMessage());
return;
}
try {
long toRead = end - start;
if (toRead < logFile.length()) {
html.p().__("Showing " + toRead + " bytes. Click ")
.a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER),
logFile.getName(), "?start=0"), "here").
__(" for full log").__();
}
IOUtils.skipFully(logByteStream, start);
InputStreamReader reader =
new InputStreamReader(logByteStream, StandardCharsets.UTF_8);
int bufferSize = 65536;
char[] cbuf = new char[bufferSize];
int len = 0;
int currentToRead = toRead > bufferSize ? bufferSize : (int) toRead;
PRE<Hamlet> pre = html.pre();
while ((len = reader.read(cbuf, 0, currentToRead)) > 0
&& toRead > 0) {
pre.__(new String(cbuf, 0, len));
toRead = toRead - len;
currentToRead = toRead > bufferSize ? bufferSize : (int) toRead;
}
pre.__();
reader.close();
} catch (IOException e) {
LOG.error(
"Exception reading log file " + logFile.getAbsolutePath(), e);
html.h1("Exception reading log file. It might be because log "
+ "file was aggregated : " + logFile.getName());
} finally {
if (logByteStream != null) {
try {
logByteStream.close();
} catch (IOException e) {
// Ignore
}
}
}
}
}
private void printLocalLogFileDirectory(Block html,
List<File> containerLogsDirs) {
// Print out log types in lexical order
Collections.sort(containerLogsDirs);
boolean foundLogFile = false;
for (File containerLogsDir : containerLogsDirs) {
File[] logFiles = containerLogsDir.listFiles();
if (logFiles != null) {
Arrays.sort(logFiles);
for (File logFile : logFiles) {
foundLogFile = true;
html.p()
.a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER),
logFile.getName(), "?start=-4096"),
logFile.getName() + " : Total file length is "
+ logFile.length() + " bytes.").__();
}
}
}
if (!foundLogFile) {
html.h1("No logs available for container " + $(CONTAINER_ID));
return;
}
}
private void printAggregatedLogFileDirectory(Block html,
List<ContainerLogMeta> containersLogMeta) throws ParseException {
List<ContainerLogFileInfo> filesInfo = new ArrayList<>();
for (ContainerLogMeta logMeta : containersLogMeta) {
filesInfo.addAll(logMeta.getContainerLogMeta());
}
//sort the list, so we could list the log file in order.
Collections.sort(filesInfo, new Comparator<ContainerLogFileInfo>() {
@Override
public int compare(ContainerLogFileInfo o1,
ContainerLogFileInfo o2) {
return createAggregatedLogFileName(o1.getFileName(),
o1.getLastModifiedTime()).compareTo(
createAggregatedLogFileName(o2.getFileName(),
o2.getLastModifiedTime()));
}
});
boolean foundLogFile = false;
for (ContainerLogFileInfo fileInfo : filesInfo) {
long timestamp = convertDateToTimeStamp(fileInfo.getLastModifiedTime());
foundLogFile = true;
String fileName = createAggregatedLogFileName(fileInfo.getFileName(),
fileInfo.getLastModifiedTime());
html.p().a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER),
fileInfo.getFileName(),
"?start=-4096&" + LOG_AGGREGATION_TYPE + "="
+ LOG_AGGREGATION_REMOTE_TYPE + "&start.time="
+ (timestamp - 1000) + "&end.time=" + (timestamp + 1000)),
fileName + " : Total file length is "
+ fileInfo.getFileSize() + " bytes.").__();
}
if (!foundLogFile) {
html.h4("No aggregated logs available for container "
+ $(CONTAINER_ID));
return;
}
}
private String createAggregatedLogFileName(String fileName,
String modificationTime) {
return fileName + "_" + modificationTime;
}
private long convertDateToTimeStamp(String dateTime)
throws ParseException {
SimpleDateFormat sdf = new SimpleDateFormat(
"EEE MMM dd HH:mm:ss Z yyyy");
Date d = sdf.parse(dateTime);
Calendar c = Calendar.getInstance();
c.setTime(d);
return c.getTimeInMillis();
}
}
}
| ContainersLogsBlock |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java | {
"start": 681,
"end": 1480
} | class ____ implements Processor {
private final ZoneId zoneId;
BaseDateTimeProcessor(ZoneId zoneId) {
this.zoneId = zoneId;
}
BaseDateTimeProcessor(StreamInput in) throws IOException {
zoneId = SqlStreamInput.asSqlStream(in).zoneId();
}
ZoneId zoneId() {
return zoneId;
}
@Override
public Object process(Object input) {
if (input == null) {
return null;
}
if ((input instanceof ZonedDateTime) == false) {
throw new SqlIllegalArgumentException("A [date], a [time] or a [datetime] is required; received {}", input);
}
return doProcess(((ZonedDateTime) input).withZoneSameInstant(zoneId));
}
abstract Object doProcess(ZonedDateTime dateTime);
}
| BaseDateTimeProcessor |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/core/SimplePropertyRowMapperTests.java | {
"start": 1177,
"end": 5028
} | class ____ extends AbstractRowMapperTests {
@Test
void staticQueryWithDataClass() throws Exception {
Mock mock = new Mock();
ConstructorPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(ConstructorPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
@Test
void staticQueryWithDataClassAndGenerics() throws Exception {
Mock mock = new Mock();
ConstructorPersonWithGenerics person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(ConstructorPersonWithGenerics.class));
assertThat(person.name()).isEqualTo("Bubba");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birthDate()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).containsExactly(new BigDecimal("1234.56"));
mock.verifyClosed();
}
@Test
void staticQueryWithDataClassAndSetters() throws Exception {
Mock mock = new Mock(MockType.FOUR);
ConstructorPersonWithSetters person = mock.getJdbcTemplate().queryForObject(
"select name, age, birthdate, balance from people",
new SimplePropertyRowMapper<>(ConstructorPersonWithSetters.class));
assertThat(person.name()).isEqualTo("BUBBA");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birthDate()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).isEqualTo(new BigDecimal("1234.56"));
mock.verifyClosed();
}
@Test
void staticQueryWithPlainSetters() throws Exception {
Mock mock = new Mock();
ConcretePerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(ConcretePerson.class));
verifyPerson(person);
mock.verifyClosed();
}
@Test
void staticQueryWithDataRecord() throws Exception {
Mock mock = new Mock();
RecordPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(RecordPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
@Test
void staticQueryWithDataFields() throws Exception {
Mock mock = new Mock();
FieldPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(FieldPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
@Test
void staticQueryWithIncompleteDataFields() throws Exception {
Mock mock = new Mock();
IncompleteFieldPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new SimplePropertyRowMapper<>(IncompleteFieldPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
protected void verifyPerson(RecordPerson person) {
assertThat(person.name()).isEqualTo("Bubba");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birth_date()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).isEqualTo(new BigDecimal("1234.56"));
verifyPersonViaBeanWrapper(person);
}
protected void verifyPerson(FieldPerson person) {
assertThat(person.name).isEqualTo("Bubba");
assertThat(person.age).isEqualTo(22L);
assertThat(person.birth_date).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance).isEqualTo(new BigDecimal("1234.56"));
}
protected void verifyPerson(IncompleteFieldPerson person) {
assertThat(person.name).isEqualTo("Bubba");
assertThat(person.age).isEqualTo(22L);
assertThat(person.balance).isEqualTo(new BigDecimal("1234.56"));
}
record RecordPerson(String name, long age, Date birth_date, BigDecimal balance) {
}
static | SimplePropertyRowMapperTests |
java | alibaba__nacos | core/src/test/java/com/alibaba/nacos/core/control/http/NacosHttpTpsFilterTest.java | {
"start": 2085,
"end": 8160
} | class ____ {
NacosHttpTpsFilter nacosHttpTpsFilter;
MockedStatic<ControlManagerCenter> controlManagerCenterMockedStatic;
@Mock
ControllerMethodsCache controllerMethodsCache;
@Mock
private ControlManagerCenter controlManagerCenter;
@Mock
private TpsControlManager tpsControlManager;
@BeforeEach
void before() {
controlManagerCenterMockedStatic = Mockito.mockStatic(ControlManagerCenter.class);
controlManagerCenterMockedStatic.when(() -> ControlManagerCenter.getInstance()).thenReturn(controlManagerCenter);
when(controlManagerCenter.getTpsControlManager()).thenReturn(tpsControlManager);
nacosHttpTpsFilter = new NacosHttpTpsFilter(controllerMethodsCache);
}
@AfterEach
void after() {
controlManagerCenterMockedStatic.close();
}
/**
* test tps check passed ,response is null.
*/
@Test
void testPass() throws Exception {
HttpTpsCheckRequestParserRegistry.register(new HttpTpsCheckRequestParser() {
@Override
public TpsCheckRequest parse(HttpServletRequest httpServletRequest) {
return new TpsCheckRequest();
}
@Override
public String getPointName() {
return "HealthCheck";
}
@Override
public String getName() {
return "HealthCheck";
}
});
TpsCheckResponse tpsCheckResponse = new TpsCheckResponse(true, 200, "success");
when(tpsControlManager.check(any(TpsCheckRequest.class))).thenReturn(tpsCheckResponse);
//mock http tps control method
Method method = HealthCheckRequestHandler.class.getMethod("handle", Request.class, RequestMeta.class);
MockHttpServletRequest httpServletRequest = Mockito.mock(MockHttpServletRequest.class);
MockHttpServletResponse httpServletResponse = Mockito.mock(MockHttpServletResponse.class);
MockFilterChain filterChain = Mockito.mock(MockFilterChain.class);
when(controllerMethodsCache.getMethod(eq(httpServletRequest))).thenReturn(method);
Mockito.doNothing().when(filterChain).doFilter(any(ServletRequest.class), any(ServletResponse.class));
//execute test.
nacosHttpTpsFilter.doFilter(httpServletRequest, httpServletResponse, filterChain);
//verify
Mockito.verify(filterChain, Mockito.times(1)).doFilter(httpServletRequest, httpServletResponse);
}
/**
* test tps check rejected ,response is not null.
*/
@Test
void testRejected() throws Exception {
HttpTpsCheckRequestParserRegistry.register(new HttpTpsCheckRequestParser() {
@Override
public TpsCheckRequest parse(HttpServletRequest httpServletRequest) {
return new TpsCheckRequest();
}
@Override
public String getPointName() {
return "HealthCheck";
}
@Override
public String getName() {
return "HealthCheck";
}
});
TpsCheckResponse tpsCheckResponse = new TpsCheckResponse(false, 5031, "rejected");
when(tpsControlManager.check(any(TpsCheckRequest.class))).thenReturn(tpsCheckResponse);
//mock http tps control method
Method method = HealthCheckRequestHandler.class.getMethod("handle", Request.class, RequestMeta.class);
MockHttpServletRequest httpServletRequest = Mockito.mock(MockHttpServletRequest.class);
MockHttpServletResponse httpServletResponse = Mockito.mock(MockHttpServletResponse.class);
MockFilterChain filterChain = Mockito.mock(MockFilterChain.class);
when(controllerMethodsCache.getMethod(eq(httpServletRequest))).thenReturn(method);
AsyncContextImpl asyncContext = Mockito.mock(AsyncContextImpl.class);
Mockito.when(httpServletRequest.startAsync()).thenReturn(asyncContext);
//execute test.
nacosHttpTpsFilter.doFilter(httpServletRequest, httpServletResponse, filterChain);
//verify
Mockito.verify(filterChain, Mockito.times(0)).doFilter(any(), any());
Thread.sleep(1100L);
Mockito.verify(httpServletResponse, Mockito.times(1)).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
}
/**
* test tps check exception ,return null skip.
*/
@Test
void testTpsCheckException() throws Exception {
HttpTpsCheckRequestParserRegistry.register(new HttpTpsCheckRequestParser() {
@Override
public TpsCheckRequest parse(HttpServletRequest httpServletRequest) {
return new TpsCheckRequest();
}
@Override
public String getPointName() {
return "HealthCheck";
}
@Override
public String getName() {
return "HealthCheck";
}
});
when(tpsControlManager.check(any(TpsCheckRequest.class))).thenThrow(new RuntimeException("324565"));
//mock http tps control method
Method method = HealthCheckRequestHandler.class.getMethod("handle", Request.class, RequestMeta.class);
HttpServletRequest httpServletRequest = Mockito.mock(HttpServletRequest.class);
HttpServletResponse httpServletResponse = Mockito.mock(HttpServletResponse.class);
MockFilterChain filterChain = Mockito.mock(MockFilterChain.class);
when(controllerMethodsCache.getMethod(eq(httpServletRequest))).thenReturn(method);
//execute test.
nacosHttpTpsFilter.doFilter(httpServletRequest, httpServletResponse, filterChain);
//verify
Mockito.verify(filterChain, Mockito.times(1)).doFilter(httpServletRequest, httpServletResponse);
}
}
| NacosHttpTpsFilterTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FieldCanBeFinalTest.java | {
"start": 2637,
"end": 3390
} | class ____ {
// BUG: Diagnostic contains: private final int x1
private int x1;
private int x2;
// BUG: Diagnostic contains: private static final int y1
private static int y1;
private static int y2;
{
x1 = 42;
x2 = 42;
}
static {
y1 = 42;
y2 = 42;
}
void mutate() {
x2 = 0;
y2 = 0;
}
}
""")
.doTest();
}
@Test
public void staticSetFromInstance() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__logging-log4j2 | log4j-spring-boot/src/test/java/org/apache/logging/log4j/spring/boot/Log4j2SpringBootInitTest.java | {
"start": 1770,
"end": 2222
} | class ____ {
@Test
void testEnvironment() {
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
final ListAppender app = context.getConfiguration().getAppender("Out");
assertNotNull(app);
assertEquals(1, app.getMessages().size());
assertEquals("prod: Started: log4j-spring-boot", app.getMessages().get(0));
}
@SpringBootApplication
public static | Log4j2SpringBootInitTest |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/table/api/bridge/java/StreamTableEnvironment.java | {
"start": 37610,
"end": 39195
} | class ____ for the format of the path.
* @param dataStream The {@link DataStream} out of which to create the view.
* @param fields The fields expressions to map original fields of the DataStream to the fields
* of the View.
* @param <T> The type of the {@link DataStream}.
* @deprecated Use {@link #createTemporaryView(String, DataStream, Schema)} instead. In most
* cases, {@link #createTemporaryView(String, DataStream)} should already be sufficient. It
* integrates with the new type system and supports all kinds of {@link DataTypes} that the
* table runtime can consume. The semantics might be slightly different for raw and
* structured types.
*/
@Deprecated
<T> void createTemporaryView(String path, DataStream<T> dataStream, Expression... fields);
/**
* Converts the given {@link Table} into an append {@link DataStream} of a specified type.
*
* <p>The {@link Table} must only have insert (append) changes. If the {@link Table} is also
* modified by update or delete changes, the conversion will fail.
*
* <p>The fields of the {@link Table} are mapped to {@link DataStream} fields as follows:
*
* <ul>
* <li>{@link Row} and {@link org.apache.flink.api.java.tuple.Tuple} types: Fields are mapped
* by position, field types must match.
* <li>POJO {@link DataStream} types: Fields are mapped by field name, field types must match.
* </ul>
*
* @param table The {@link Table} to convert.
* @param clazz The | description |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/namesrv/GetKVConfigResponseHeader.java | {
"start": 1062,
"end": 1392
} | class ____ implements CommandCustomHeader {
@CFNullable
private String value;
@Override
public void checkFields() throws RemotingCommandException {
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
| GetKVConfigResponseHeader |
java | apache__kafka | storage/src/test/java/org/apache/kafka/tiered/storage/actions/ExpectBrokerInISRAction.java | {
"start": 1306,
"end": 2481
} | class ____ implements TieredStorageTestAction {
private final TopicPartition topicPartition;
private final Integer replicaId;
public ExpectBrokerInISRAction(TopicPartition topicPartition,
Integer replicaId) {
this.topicPartition = topicPartition;
this.replicaId = replicaId;
}
@Override
public void doExecute(TieredStorageTestContext context) throws InterruptedException {
TestUtils.waitForCondition(() -> {
TopicDescription description = describeTopic(context, topicPartition.topic());
TopicPartitionInfo partitionInfo = description.partitions()
.get(topicPartition.partition());
if (partitionInfo != null) {
return partitionInfo.isr().stream().anyMatch(node -> node.id() == replicaId);
}
return false;
}, "Expected broker " + replicaId + " to be in ISR for " + topicPartition);
}
@Override
public void describe(PrintStream output) {
output.printf("expect-broker-in-isr topic-partition: %s broker-id: %d%n", topicPartition, replicaId);
}
}
| ExpectBrokerInISRAction |
java | apache__flink | flink-test-utils-parent/flink-table-filesystem-test-utils/src/test/java/org/apache/flink/table/file/testutils/catalog/TestFileSystemCatalogTest.java | {
"start": 18827,
"end": 19254
} | class ____ implements RefreshHandler {
private final String handlerString;
public TestRefreshHandler(String handlerString) {
this.handlerString = handlerString;
}
@Override
public String asSummaryString() {
return "test refresh handler";
}
public byte[] toBytes() {
return handlerString.getBytes();
}
}
}
| TestRefreshHandler |
java | apache__maven | api/maven-api-cli/src/main/java/org/apache/maven/api/cli/Invoker.java | {
"start": 1519,
"end": 2679
} | interface ____ extends AutoCloseable {
/**
* Invokes the Maven application using the provided {@link InvokerRequest}.
* This method is responsible for executing the Maven command or build
* process based on the information contained in the request.
*
* @param invokerRequest the request containing all necessary information for the invocation
* @return an integer representing the exit code of the invocation (0 typically indicates success)
* @throws InvokerException if an error occurs during the invocation process.
*/
int invoke(@Nonnull InvokerRequest invokerRequest) throws InvokerException;
/**
* Closes and disposes of this {@link Invoker} instance, releasing any resources it may hold.
* This method is called automatically when using try-with-resources statements.
*
* <p>The default implementation does nothing. Subclasses should override this method
* if they need to perform cleanup operations.</p>
*
* @throws InvokerException if an error occurs while closing the {@link Invoker}
*/
@Override
default void close() throws InvokerException {}
}
| Invoker |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/AnnotationTypeMappings.java | {
"start": 1476,
"end": 1723
} | class ____ designed to be cached so that meta-annotations only need to
* be searched once, regardless of how many times they are actually used.
*
* @author Phillip Webb
* @author Sam Brannen
* @since 5.2
* @see AnnotationTypeMapping
*/
final | is |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CheckReturnValueTest.java | {
"start": 33971,
"end": 34541
} | class ____ {
void foo() {
makeBarOrThrow();
}
@CanIgnoreReturnValue
String makeBarOrThrow() {
throw new UnsupportedOperationException();
}
}
""")
.doTest();
}
@Test
public void suggestCanIgnoreReturnValueForMethodReference() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.errorprone.annotations.CheckReturnValue;
@CheckReturnValue
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/join/Death.java | {
"start": 630,
"end": 859
} | class ____ implements Serializable {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
public Integer id;
@Column(name = "death_date")
public Date date;
@Column(table = "ExtendedDeath")
public String howDoesItHappen;
}
| Death |
java | apache__camel | components/camel-weather/src/test/java/org/apache/camel/component/weather/CurrentWeatherConsumerGeolocationProviderTest.java | {
"start": 1069,
"end": 2213
} | class ____ extends CamelTestSupport {
private static final String APPID = "test";
private static final String GEOLOCATION_ACCESS_KEY = "IPSTACK_ACCESS_KEY";
private static final String GEOLOCATION_REQUEST_HOST_IP = "LOCAL_IP";
@Test
public void checkGeolocationProviderConfig() {
WeatherEndpoint endpoint = context().getEndpoint("weather:foo?"
+ "geolocationRequestHostIP=" + GEOLOCATION_REQUEST_HOST_IP
+ "&geolocationAccessKey=" + GEOLOCATION_ACCESS_KEY
+ "&appid=" + APPID,
WeatherEndpoint.class);
WeatherConfiguration configuration = endpoint.getConfiguration();
assertEquals(APPID, configuration.getAppid());
assertEquals(GEOLOCATION_ACCESS_KEY, configuration.getGeolocationAccessKey());
assertEquals(GEOLOCATION_REQUEST_HOST_IP, configuration.getGeolocationRequestHostIP());
assertNotNull(configuration.getGeoLocationProvider());
}
}
| CurrentWeatherConsumerGeolocationProviderTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/aot/generate/DefaultGenerationContext.java | {
"start": 2205,
"end": 4633
} | class ____
* @param generatedFiles the generated files
* @param runtimeHints the runtime hints
*/
public DefaultGenerationContext(ClassNameGenerator classNameGenerator, GeneratedFiles generatedFiles,
RuntimeHints runtimeHints) {
this(new GeneratedClasses(classNameGenerator), generatedFiles, runtimeHints);
}
/**
* Create a new {@link DefaultGenerationContext} instance backed by the
* specified items.
* @param generatedClasses the generated classes
* @param generatedFiles the generated files
* @param runtimeHints the runtime hints
*/
DefaultGenerationContext(GeneratedClasses generatedClasses,
GeneratedFiles generatedFiles, RuntimeHints runtimeHints) {
Assert.notNull(generatedClasses, "'generatedClasses' must not be null");
Assert.notNull(generatedFiles, "'generatedFiles' must not be null");
Assert.notNull(runtimeHints, "'runtimeHints' must not be null");
this.sequenceGenerator = new ConcurrentHashMap<>();
this.generatedClasses = generatedClasses;
this.generatedFiles = generatedFiles;
this.runtimeHints = runtimeHints;
}
/**
* Create a new {@link DefaultGenerationContext} instance based on the
* supplied {@code existing} context and feature name.
* @param existing the existing context upon which to base the new one
* @param featureName the feature name to use
* @since 6.0.12
*/
protected DefaultGenerationContext(DefaultGenerationContext existing, String featureName) {
int sequence = existing.sequenceGenerator.computeIfAbsent(featureName, key -> new AtomicInteger()).getAndIncrement();
if (sequence > 0) {
featureName += sequence;
}
this.sequenceGenerator = existing.sequenceGenerator;
this.generatedClasses = existing.generatedClasses.withFeatureNamePrefix(featureName);
this.generatedFiles = existing.generatedFiles;
this.runtimeHints = existing.runtimeHints;
}
@Override
public GeneratedClasses getGeneratedClasses() {
return this.generatedClasses;
}
@Override
public GeneratedFiles getGeneratedFiles() {
return this.generatedFiles;
}
@Override
public RuntimeHints getRuntimeHints() {
return this.runtimeHints;
}
@Override
public DefaultGenerationContext withName(String name) {
return new DefaultGenerationContext(this, name);
}
/**
* Write any generated content out to the generated files.
*/
public void writeGeneratedContent() {
this.generatedClasses.writeTo(this.generatedFiles);
}
}
| names |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/bootstrap/builders/InternalServiceConfigBuilder.java | {
"start": 2236,
"end": 14035
} | class ____<T> {
private final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
private static final Set<String> ACCEPTABLE_PROTOCOL =
Stream.of("dubbo", "tri", "injvm").collect(Collectors.toSet());
private final ApplicationModel applicationModel;
private String protocol;
private Integer port;
private String registryId;
private Class<T> interfaceClass;
private Executor executor;
private T ref;
private String version;
private InternalServiceConfigBuilder(ApplicationModel applicationModel) {
this.applicationModel = applicationModel;
}
public static <T> InternalServiceConfigBuilder<T> newBuilder(ApplicationModel applicationModel) {
return new InternalServiceConfigBuilder<>(applicationModel);
}
public InternalServiceConfigBuilder<T> interfaceClass(Class<T> interfaceClass) {
this.interfaceClass = interfaceClass;
return getThis();
}
public InternalServiceConfigBuilder<T> executor(Executor executor) {
this.executor = executor;
return getThis();
}
public InternalServiceConfigBuilder<T> ref(T ref) {
this.ref = ref;
return getThis();
}
public InternalServiceConfigBuilder<T> registryId(String registryId) {
this.registryId = registryId;
return getThis();
}
public InternalServiceConfigBuilder<T> protocol(String protocol, String key) {
if (StringUtils.isEmpty(protocol) && StringUtils.isNotBlank(key)) {
Map<String, String> params = getApplicationConfig().getParameters();
if (CollectionUtils.isNotEmptyMap(params)) {
protocol = params.get(key);
}
}
this.protocol = StringUtils.isNotEmpty(protocol) ? protocol : getRelatedOrDefaultProtocol();
return getThis();
}
public InternalServiceConfigBuilder<T> version(String version) {
this.version = version;
return getThis();
}
/**
* Get other configured protocol from environment in priority order. If get nothing, use default dubbo.
*
* @return
*/
private String getRelatedOrDefaultProtocol() {
String protocol = "";
// <dubbo:protocol/>
if (StringUtils.isEmpty(protocol)) {
Collection<ProtocolConfig> protocols =
applicationModel.getApplicationConfigManager().getProtocols();
if (CollectionUtils.isNotEmpty(protocols)) {
protocol = protocols.stream()
.map(ProtocolConfig::getName)
.filter(StringUtils::isNotEmpty)
.filter(p -> ACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
}
}
// <dubbo:provider/>
List<ModuleModel> moduleModels = applicationModel.getPubModuleModels();
if (StringUtils.isEmpty(protocol)) {
Stream<ProviderConfig> providerConfigStream = moduleModels.stream()
.map(ModuleModel::getConfigManager)
.map(ModuleConfigManager::getProviders)
.filter(CollectionUtils::isNotEmpty)
.flatMap(Collection::stream);
protocol = providerConfigStream
.filter((providerConfig) -> providerConfig.getProtocol() != null
|| CollectionUtils.isNotEmpty(providerConfig.getProtocols()))
.map(providerConfig -> {
if (providerConfig.getProtocol() != null
&& StringUtils.isNotEmpty(
providerConfig.getProtocol().getName())) {
return providerConfig.getProtocol().getName();
} else {
return providerConfig.getProtocols().stream()
.map(ProtocolConfig::getName)
.filter(StringUtils::isNotEmpty)
.findFirst()
.orElse("");
}
})
.filter(StringUtils::isNotEmpty)
.filter(p -> ACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
}
// <dubbo:application/>
if (StringUtils.isEmpty(protocol)) {
protocol = getApplicationConfig().getProtocol();
if (StringUtils.isEmpty(protocol)) {
Map<String, String> params = getApplicationConfig().getParameters();
if (CollectionUtils.isNotEmptyMap(params)) {
protocol = params.get(APPLICATION_PROTOCOL_KEY);
}
}
}
// <dubbo:consumer/>
if (StringUtils.isEmpty(protocol)) {
protocol = moduleModels.stream()
.map(ModuleModel::getConfigManager)
.map(ModuleConfigManager::getConsumers)
.filter(CollectionUtils::isNotEmpty)
.flatMap(Collection::stream)
.map(ConsumerConfig::getProtocol)
.filter(StringUtils::isNotEmpty)
.filter(p -> ACCEPTABLE_PROTOCOL.contains(p))
.findFirst()
.orElse("");
}
return StringUtils.isNotEmpty(protocol) && ACCEPTABLE_PROTOCOL.contains(protocol) ? protocol : DUBBO_PROTOCOL;
}
public InternalServiceConfigBuilder<T> protocol(String protocol) {
this.protocol(protocol, null);
return getThis();
}
public InternalServiceConfigBuilder<T> port(Integer specPort) {
return port(specPort, null);
}
public InternalServiceConfigBuilder<T> port(Integer specPort, String key) {
Assert.notEmptyString(this.protocol, "export protocol is null");
Assert.notNull(this.interfaceClass, "export interfaceClass is null");
if (specPort != null) {
this.port = specPort;
return getThis();
}
Map<String, String> params = getApplicationConfig().getParameters();
if (CollectionUtils.isNotEmptyMap(params) && StringUtils.isNotBlank(key)) {
String rawPort = getApplicationConfig().getParameters().get(key);
if (StringUtils.isNotEmpty(rawPort)) {
specPort = Integer.parseInt(rawPort);
}
}
if (specPort == null || specPort < -1) {
try {
if (logger.isInfoEnabled()) {
logger.info(interfaceClass.getName()
+ "Service Port hasn't been set will use default protocol defined in protocols.");
}
Protocol protocol =
applicationModel.getExtensionLoader(Protocol.class).getExtension(this.protocol);
if (protocol != null && protocol.getServers() != null) {
Iterator<ProtocolServer> it = protocol.getServers().iterator();
// export service may export before normal service export, it.hasNext() will return false.
// so need use specified protocol port.
if (it.hasNext()) {
ProtocolServer server = it.next();
String rawPort = server.getUrl().getParameter(BIND_PORT_KEY);
if (rawPort == null) {
String addr = server.getAddress();
rawPort = addr.substring(addr.indexOf(":") + 1);
}
this.port = Integer.parseInt(rawPort);
} else {
ProtocolConfig specifiedProtocolConfig = getProtocolConfig();
if (specifiedProtocolConfig != null) {
Integer protocolPort = specifiedProtocolConfig.getPort();
if (null != protocolPort && protocolPort != -1) {
this.port = protocolPort;
}
}
}
}
} catch (Exception e) {
logger.error(
INTERNAL_ERROR,
"invalid specified " + port + " port, error " + e.getMessage(),
"",
"Failed to find any valid protocol, will use random port to export service.",
e);
}
}
if (this.port == null) {
this.port = -1;
}
return getThis();
}
private ProtocolConfig getProtocolConfig() {
return applicationModel
.getApplicationConfigManager()
.getProtocol(protocol)
.orElse(null);
}
public ServiceConfig<T> build(Consumer<ServiceConfig<T>> configConsumer) {
ProtocolConfig protocolConfig = new ProtocolConfig();
protocolConfig.setName(this.protocol);
protocolConfig.setPort(this.port);
this.nullAssert();
logger.info("[SERVICE_PUBLISH] [METADATA_REGISTER] Using " + this.protocol + " protocol to export "
+ interfaceClass.getName() + " service on port " + protocolConfig.getPort());
applicationModel
.getApplicationConfigManager()
.getProtocol(this.protocol)
.ifPresent(p -> {
protocolConfig.mergeProtocol(p);
// clear extra protocols possibly merged from global ProtocolConfig
protocolConfig.setExtProtocol(null);
});
ApplicationConfig applicationConfig = getApplicationConfig();
ServiceConfig<T> serviceConfig = new ServiceConfig<>();
serviceConfig.setScopeModel(applicationModel.getInternalModule());
serviceConfig.setApplication(applicationConfig);
RegistryConfig registryConfig = new RegistryConfig();
registryConfig.refresh();
registryConfig.setNeedRefresh(false);
registryConfig.setId(this.registryId);
registryConfig.setAddress("N/A");
registryConfig.setScopeModel(this.applicationModel);
serviceConfig.setRegistry(registryConfig);
serviceConfig.setRegister(false);
serviceConfig.setProtocol(protocolConfig);
serviceConfig.setDelay(0);
serviceConfig.setInterface(interfaceClass);
serviceConfig.setRef(this.ref);
serviceConfig.setGroup(applicationConfig.getName());
if (StringUtils.isNotEmpty(version)) {
serviceConfig.setVersion(version);
} else {
serviceConfig.setVersion("1.0.0");
}
serviceConfig.setFilter("-default");
serviceConfig.setExecutor(executor);
if (null != configConsumer) {
configConsumer.accept(serviceConfig);
}
return serviceConfig;
}
public ServiceConfig<T> build() {
return build(null);
}
private void nullAssert() {
Assert.notNull(port, "export service port is null");
Assert.notNull(protocol, "export service protocol is null");
Assert.notNull(interfaceClass, "export service interfaceClass is null");
Assert.notNull(ref, "export service ref is null");
Assert.notNull(registryId, "export service registryId is null");
}
protected InternalServiceConfigBuilder<T> getThis() {
return this;
}
private ApplicationConfig getApplicationConfig() {
return applicationModel.getApplicationConfigManager().getApplicationOrElseThrow();
}
}
| InternalServiceConfigBuilder |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/instrument/classloading/jboss/JBossLoadTimeWeaver.java | {
"start": 2683,
"end": 5833
} | class ____
* the supplied {@link ClassLoader}.
* @param classLoader the {@code ClassLoader} to delegate to for weaving
*/
public JBossLoadTimeWeaver(@Nullable ClassLoader classLoader) {
Assert.notNull(classLoader, "ClassLoader must not be null");
this.classLoader = classLoader;
try {
Field transformer = ReflectionUtils.findField(classLoader.getClass(), "transformer");
if (transformer == null) {
throw new IllegalArgumentException("Could not find 'transformer' field on JBoss ClassLoader: " +
classLoader.getClass().getName());
}
transformer.setAccessible(true);
Object suggestedTransformer = transformer.get(classLoader);
if (suggestedTransformer.getClass().getName().equals(WRAPPER_TRANSFORMER_CLASS_NAME)) {
Field wrappedTransformer = ReflectionUtils.findField(suggestedTransformer.getClass(), "transformer");
if (wrappedTransformer == null) {
throw new IllegalArgumentException(
"Could not find 'transformer' field on JBoss JLIClassTransformer: " +
suggestedTransformer.getClass().getName());
}
wrappedTransformer.setAccessible(true);
suggestedTransformer = wrappedTransformer.get(suggestedTransformer);
}
Class<?> transformerType = ClassFileTransformer.class;
if (suggestedTransformer.getClass().getName().equals(LEGACY_DELEGATING_TRANSFORMER_CLASS_NAME)) {
this.adaptTransformer = (t -> t);
}
else if (suggestedTransformer.getClass().getName().equals(DELEGATING_TRANSFORMER_CLASS_NAME)) {
transformerType = classLoader.loadClass(CLASS_TRANSFORMER_CLASS_NAME);
Constructor<?> adaptedTransformer = classLoader.loadClass(WRAPPER_TRANSFORMER_CLASS_NAME)
.getConstructor(ClassFileTransformer.class);
this.adaptTransformer = adaptedTransformer::newInstance;
}
else {
throw new IllegalStateException(
"Transformer not of expected type DelegatingClass(File)Transformer: " +
suggestedTransformer.getClass().getName());
}
this.delegatingTransformer = suggestedTransformer;
Method addTransformer = ReflectionUtils.findMethod(this.delegatingTransformer.getClass(),
"addTransformer", transformerType);
if (addTransformer == null) {
throw new IllegalArgumentException(
"Could not find 'addTransformer' method on JBoss DelegatingClass(File)Transformer: " +
this.delegatingTransformer.getClass().getName());
}
addTransformer.setAccessible(true);
this.addTransformer = addTransformer;
}
catch (Throwable ex) {
throw new IllegalStateException("Could not initialize JBoss LoadTimeWeaver", ex);
}
}
@Override
public void addTransformer(ClassFileTransformer transformer) {
try {
this.addTransformer.invoke(this.delegatingTransformer, this.adaptTransformer.apply(transformer));
}
catch (Throwable ex) {
throw new IllegalStateException("Could not add transformer on JBoss ClassLoader: " + this.classLoader, ex);
}
}
@Override
public ClassLoader getInstrumentableClassLoader() {
return this.classLoader;
}
@Override
public ClassLoader getThrowawayClassLoader() {
return new SimpleThrowawayClassLoader(getInstrumentableClassLoader());
}
}
| using |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/SqlResultSetMappingsJpaAnnotation.java | {
"start": 727,
"end": 1843
} | class ____
implements SqlResultSetMappings, RepeatableContainer<SqlResultSetMapping> {
private SqlResultSetMapping[] value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public SqlResultSetMappingsJpaAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public SqlResultSetMappingsJpaAnnotation(SqlResultSetMappings annotation, ModelsContext modelContext) {
this.value = extractJdkValue( annotation, JpaAnnotations.SQL_RESULT_SET_MAPPINGS, "value", modelContext );
}
/**
* Used in creating annotation instances from Jandex variant
*/
public SqlResultSetMappingsJpaAnnotation(
Map<String, Object> attributeValues,
ModelsContext modelContext) {
this.value = (SqlResultSetMapping[]) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return SqlResultSetMappings.class;
}
@Override
public SqlResultSetMapping[] value() {
return value;
}
public void value(SqlResultSetMapping[] value) {
this.value = value;
}
}
| SqlResultSetMappingsJpaAnnotation |
java | quarkusio__quarkus | extensions/virtual-threads/runtime/src/test/java/io/quarkus/virtual/threads/VirtualThreadExecutorSupplierTest.java | {
"start": 863,
"end": 6788
} | class ____ {
@BeforeEach
void configRecorder() {
VirtualThreadsRecorder.config = new SmallRyeConfigBuilder()
.addDiscoveredConverters()
.withMapping(VirtualThreadsConfig.class)
.build().getConfigMapping(VirtualThreadsConfig.class);
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void virtualThreadCustomScheduler()
throws ClassNotFoundException, InvocationTargetException, IllegalAccessException, NoSuchMethodException {
Executor executor = VirtualThreadsRecorder.newVirtualThreadPerTaskExecutorWithName("vthread-");
var assertSubscriber = Uni.createFrom().emitter(e -> {
assertThat(Thread.currentThread().getName()).isNotEmpty()
.startsWith("vthread-");
assertThatItRunsOnVirtualThread();
e.complete(null);
}).runSubscriptionOn(executor)
.subscribe().withSubscriber(UniAssertSubscriber.create());
assertSubscriber.awaitItem(Duration.ofSeconds(1)).assertCompleted();
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void execute() throws ClassNotFoundException, InvocationTargetException, NoSuchMethodException, IllegalAccessException {
Executor executor = VirtualThreadsRecorder.newVirtualThreadPerTaskExecutorWithName(null);
var assertSubscriber = Uni.createFrom().emitter(e -> {
assertThat(Thread.currentThread().getName()).isEmpty();
assertThatItRunsOnVirtualThread();
e.complete(null);
}).runSubscriptionOn(executor)
.subscribe().withSubscriber(UniAssertSubscriber.create());
assertSubscriber.awaitItem(Duration.ofSeconds(1)).assertCompleted();
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void executePropagatesVertxContext() throws ExecutionException, InterruptedException {
ExecutorService executorService = VirtualThreadsRecorder.getCurrent();
Vertx vertx = Vertx.vertx();
CompletableFuture<Context> future = new CompletableFuture<>();
vertx.executeBlocking(() -> {
executorService.execute(() -> {
assertThatItRunsOnVirtualThread();
future.complete(Vertx.currentContext());
});
return null;
}).toCompletionStage().toCompletableFuture().get();
assertThat(future.get()).isNotNull();
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void executePropagatesVertxContextMutiny() {
ExecutorService executorService = VirtualThreadsRecorder.getCurrent();
Vertx vertx = Vertx.vertx();
var assertSubscriber = Uni.createFrom().voidItem()
.runSubscriptionOn(command -> vertx.executeBlocking(() -> {
command.run();
return null;
}))
.emitOn(executorService)
.map(x -> {
assertThatItRunsOnVirtualThread();
return Vertx.currentContext();
})
.subscribe().withSubscriber(UniAssertSubscriber.create());
assertThat(assertSubscriber.awaitItem().assertCompleted().getItem()).isNotNull();
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void submitPropagatesVertxContext() throws ExecutionException, InterruptedException {
ExecutorService executorService = VirtualThreadsRecorder.getCurrent();
Vertx vertx = Vertx.vertx();
CompletableFuture<Context> future = new CompletableFuture<>();
vertx.executeBlocking(() -> {
executorService.submit(() -> {
assertThatItRunsOnVirtualThread();
future.complete(Vertx.currentContext());
});
return null;
}).toCompletionStage().toCompletableFuture().get();
assertThat(future.get()).isNotNull();
}
@Test
@EnabledForJreRange(min = JRE.JAVA_20, disabledReason = "Virtual Threads are a preview feature starting from Java 20")
void invokeAllPropagatesVertxContext() throws ExecutionException, InterruptedException {
ExecutorService executorService = VirtualThreadsRecorder.getCurrent();
Vertx vertx = Vertx.vertx();
List<Future<Context>> futures = vertx.executeBlocking(() -> {
return executorService.invokeAll(List.of((Callable<Context>) () -> {
assertThatItRunsOnVirtualThread();
return Vertx.currentContext();
}, (Callable<Context>) () -> {
assertThatItRunsOnVirtualThread();
return Vertx.currentContext();
}));
}).toCompletionStage().toCompletableFuture().get();
assertThat(futures).allSatisfy(contextFuture -> assertThat(contextFuture.get()).isNotNull());
}
public static void assertThatItRunsOnVirtualThread() {
// We cannot depend on a Java 20.
try {
Method isVirtual = Thread.class.getMethod("isVirtual");
isVirtual.setAccessible(true);
boolean virtual = (Boolean) isVirtual.invoke(Thread.currentThread());
if (!virtual) {
throw new AssertionError("Thread " + Thread.currentThread() + " is not a virtual thread");
}
} catch (Exception e) {
throw new AssertionError(
"Thread " + Thread.currentThread() + " is not a virtual thread - cannot invoke Thread.isVirtual()", e);
}
}
}
| VirtualThreadExecutorSupplierTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/IterableUtil_toArray_Test.java | {
"start": 1014,
"end": 2113
} | class ____ {
private final ArrayList<String> values = newArrayList("one", "two");
@Test
void should_return_null_when_given_iterable_is_null() {
assertThat(IterableUtil.toArray((Iterable<String>) null)).isNull();
assertThat(IterableUtil.toArray(null, Object.class)).isNull();
}
@Test
void should_return_an_object_array_with_given_iterable_elements() {
Object[] objects = IterableUtil.toArray(values);
assertThat(objects).containsExactly("one", "two");
String[] strings = IterableUtil.toArray(values, String.class);
assertThat(strings).containsExactly("one", "two");
}
@Test
void should_return_empty_array_when_given_iterable_is_empty() {
assertThat(IterableUtil.toArray(Collections.<Object> emptyList())).isEmpty();
assertThat(IterableUtil.toArray(emptyList(), Object.class)).isEmpty();
}
@Test
void should_return_an_array_of_given_iterable_type_with_given_iterable_elements() {
CharSequence[] result = IterableUtil.toArray(values, CharSequence.class);
assertThat(result).containsExactly("one", "two");
}
}
| IterableUtil_toArray_Test |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/server/reactive/AbstractListenerReadPublisher.java | {
"start": 1849,
"end": 8423
} | class ____<T> implements Publisher<T> {
/**
* Special logger for debugging Reactive Streams signals.
* @see LogDelegateFactory#getHiddenLog(Class)
* @see AbstractListenerWriteProcessor#rsWriteLogger
* @see AbstractListenerWriteFlushProcessor#rsWriteFlushLogger
* @see WriteResultPublisher#rsWriteResultLogger
*/
protected static Log rsReadLogger = LogDelegateFactory.getHiddenLog(AbstractListenerReadPublisher.class);
static final DataBuffer EMPTY_BUFFER = DefaultDataBufferFactory.sharedInstance.allocateBuffer(0);
private final AtomicReference<State> state = new AtomicReference<>(State.UNSUBSCRIBED);
private volatile long demand;
@SuppressWarnings("rawtypes")
private static final AtomicLongFieldUpdater<AbstractListenerReadPublisher> DEMAND_FIELD_UPDATER =
AtomicLongFieldUpdater.newUpdater(AbstractListenerReadPublisher.class, "demand");
private volatile @Nullable Subscriber<? super T> subscriber;
/** Flag to defer transition to COMPLETED briefly while SUBSCRIBING or READING. */
private volatile boolean completionPending;
/** Flag to defer transition to COMPLETED briefly while SUBSCRIBING or READING. */
private volatile @Nullable Throwable errorPending;
private final String logPrefix;
public AbstractListenerReadPublisher() {
this("");
}
/**
* Create an instance with the given log prefix.
* @since 5.1
*/
public AbstractListenerReadPublisher(String logPrefix) {
this.logPrefix = logPrefix;
}
/**
* Return the configured log message prefix.
* @since 5.1
*/
public String getLogPrefix() {
return this.logPrefix;
}
// Publisher implementation...
@Override
public void subscribe(Subscriber<? super T> subscriber) {
this.state.get().subscribe(this, subscriber);
}
// Async I/O notification methods...
/**
* Invoked when reading is possible, either in the same thread after a check
* via {@link #checkOnDataAvailable()}, or as a callback from the underlying
* container.
*/
public final void onDataAvailable() {
rsReadLogger.trace(getLogPrefix() + "onDataAvailable");
this.state.get().onDataAvailable(this);
}
/**
* Subclasses can call this method to signal onComplete, delegating a
* notification from the container when all data has been read.
*/
public void onAllDataRead() {
State state = this.state.get();
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "onAllDataRead [" + state + "]");
}
state.onAllDataRead(this);
}
/**
* Subclasses can call this to signal onError, delegating a
* notification from the container for an error.
*/
public final void onError(Throwable ex) {
State state = this.state.get();
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "onError: " + ex + " [" + state + "]");
}
state.onError(this, ex);
}
// Read API methods to be implemented or template methods to override...
/**
* Check if data is available and either call {@link #onDataAvailable()}
* immediately or schedule a notification.
*/
protected abstract void checkOnDataAvailable();
/**
* Read once from the input, if possible.
* @return the item that was read; or {@code null}
*/
protected abstract @Nullable T read() throws IOException;
/**
* Invoked when reading is paused due to a lack of demand.
* <p><strong>Note:</strong> This method is guaranteed not to compete with
* {@link #checkOnDataAvailable()} so it can be used to safely suspend
* reading, if the underlying API supports it, i.e. without competing with
* an implicit call to resume via {@code checkOnDataAvailable()}.
* @since 5.0.2
*/
protected abstract void readingPaused();
/**
* Invoked after an I/O read error from the underlying server or after a
* cancellation signal from the downstream consumer to allow subclasses
* to discard any current cached data they might have.
* @since 5.0.11
*/
protected abstract void discardData();
// Private methods for use in State...
/**
* Read and publish data one by one until there are no more items
* to read (i.e. input queue drained), or there is no more demand.
* @return {@code true} if there is demand but no more to read, or
* {@code false} if there is more to read but lack of demand.
*/
private boolean readAndPublish() throws IOException {
long r;
while ((r = this.demand) > 0 && (this.state.get() != State.COMPLETED)) {
T data = read();
if (data == EMPTY_BUFFER) {
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "0 bytes read, trying again");
}
}
else if (data != null) {
if (r != Long.MAX_VALUE) {
DEMAND_FIELD_UPDATER.addAndGet(this, -1L);
}
Subscriber<? super T> subscriber = this.subscriber;
Assert.state(subscriber != null, "No subscriber");
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "Publishing " + data.getClass().getSimpleName());
}
subscriber.onNext(data);
}
else {
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "No more to read");
}
return true;
}
}
return false;
}
private boolean changeState(State oldState, State newState) {
boolean result = this.state.compareAndSet(oldState, newState);
if (result && rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + oldState + " -> " + newState);
}
return result;
}
private void changeToDemandState(State oldState) {
if (changeState(oldState, State.DEMAND)) {
// Protect from infinite recursion in Undertow, where we can't check if data
// is available, so all we can do is to try to read.
// Generally, no need to check if we just came out of readAndPublish()...
if (oldState != State.READING) {
checkOnDataAvailable();
}
}
}
private boolean handlePendingCompletionOrError() {
State state = this.state.get();
if (state == State.DEMAND || state == State.NO_DEMAND) {
if (this.completionPending) {
rsReadLogger.trace(getLogPrefix() + "Processing pending completion");
this.state.get().onAllDataRead(this);
return true;
}
Throwable ex = this.errorPending;
if (ex != null) {
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "Processing pending completion with error: " + ex);
}
this.state.get().onError(this, ex);
return true;
}
}
return false;
}
private Subscription createSubscription() {
return new ReadSubscription();
}
/**
* Subscription that delegates signals to State.
*/
private final | AbstractListenerReadPublisher |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java | {
"start": 1582,
"end": 3438
} | enum ____ implements ColumnPrefix<AppToFlowTable> {
/**
* The flow name.
*/
FLOW_NAME(AppToFlowColumnFamily.MAPPING, "flow_name"),
/**
* The flow run ID.
*/
FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
/**
* The user.
*/
USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
private final ColumnFamily<AppToFlowTable> columnFamily;
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final ValueConverter valueConverter;
AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
String columnPrefix) {
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
this.columnPrefixBytes = null;
} else {
// Future-proof by ensuring the right column prefix hygiene.
this.columnPrefixBytes =
Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
}
this.valueConverter = GenericConverter.getInstance();
}
@Override
public byte[] getColumnPrefixBytes(String qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
}
@Override
public ValueConverter getValueConverter() {
return valueConverter;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
@Override
public boolean supplementCellTimeStamp() {
return false;
}
}
| AppToFlowColumnPrefix |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/xml/internal/QueryProcessing.java | {
"start": 3497,
"end": 16740
} | class ____ {
public static void applyNamedQueries(
JaxbEntityImpl jaxbEntity,
MutableClassDetails classDetails,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbEntity.getNamedQueries() ) ) {
return;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
List<NamedQueryAnnotation> namedHqlQueryList = null;
List<NamedQueryJpaAnnotation> namedJpqlQueryList = null;
for ( int i = 0; i < jaxbEntity.getNamedQueries().size(); i++ ) {
final JaxbNamedHqlQueryImpl jaxbNamedQuery = jaxbEntity.getNamedQueries().get( i );
if ( CollectionHelper.isNotEmpty( jaxbNamedQuery.getHints() ) ) {
// treat this as a Jakarta Persistence named-query
if ( namedJpqlQueryList == null ) {
namedJpqlQueryList = new ArrayList<>();
}
final NamedQueryJpaAnnotation namedJpqlQuery = JpaAnnotations.NAMED_QUERY.createUsage( modelBuildingContext );
namedJpqlQueryList.add( namedJpqlQuery );
namedJpqlQuery.apply( jaxbNamedQuery, xmlDocumentContext );
}
else {
// treat this as a named HQL query
if ( namedHqlQueryList == null ) {
namedHqlQueryList = new ArrayList<>();
}
final NamedQueryAnnotation namedQuery = HibernateAnnotations.NAMED_QUERY.createUsage( xmlDocumentContext.getModelBuildingContext() );
namedHqlQueryList.add( namedQuery );
namedQuery.apply( jaxbNamedQuery, xmlDocumentContext );
}
}
if ( namedJpqlQueryList != null ) {
final NamedQueriesJpaAnnotation namedJpqlQueries = (NamedQueriesJpaAnnotation) classDetails.replaceAnnotationUsage(
JpaAnnotations.NAMED_QUERY,
JpaAnnotations.NAMED_QUERIES,
modelBuildingContext
);
namedJpqlQueries.value( namedJpqlQueryList.toArray( jakarta.persistence.NamedQuery[]::new ) );
}
if ( namedHqlQueryList != null ) {
final NamedQueriesAnnotation namedQueries = (NamedQueriesAnnotation) classDetails.replaceAnnotationUsage(
HibernateAnnotations.NAMED_QUERY,
HibernateAnnotations.NAMED_QUERIES,
modelBuildingContext
);
namedQueries.value( namedHqlQueryList.toArray( NamedQuery[]::new ) );
}
}
public static FlushModeType interpretFlushMode(FlushMode flushMode) {
return switch ( flushMode ) {
case AUTO -> FlushModeType.AUTO;
case ALWAYS -> FlushModeType.ALWAYS;
case COMMIT -> FlushModeType.COMMIT;
case MANUAL -> FlushModeType.MANUAL;
};
}
public static final QueryHint[] NO_HINTS = new QueryHint[0];
public static QueryHint[] collectQueryHints(List<JaxbQueryHintImpl> jaxbHints, XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbHints ) ) {
return NO_HINTS;
}
final QueryHint[] hints = new QueryHint[jaxbHints.size()];
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
for ( int i = 0; i < jaxbHints.size(); i++ ) {
final QueryHintJpaAnnotation queryHintUsage = JpaAnnotations.QUERY_HINT.createUsage( modelBuildingContext );
hints[i] = queryHintUsage;
final JaxbQueryHint jaxbHint = jaxbHints.get(i);
queryHintUsage.name( jaxbHint.getName() );
queryHintUsage.value( jaxbHint.getValue() );
}
return hints;
}
public static void applyNamedNativeQueries(
JaxbEntityImpl jaxbEntity,
MutableClassDetails classDetails,
JaxbEntityMappingsImpl jaxbRoot,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbEntity.getNamedNativeQueries() ) ) {
return;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
List<NamedNativeQueryAnnotation> namedQueryList = null;
List<NamedNativeQueryJpaAnnotation> namedJpaQueryList = null;
for ( int i = 0; i < jaxbEntity.getNamedNativeQueries().size(); i++ ) {
final JaxbNamedNativeQueryImpl jaxbNamedQuery = jaxbEntity.getNamedNativeQueries().get( i );
if ( needsJpaNativeQuery( jaxbNamedQuery ) ) {
// @jakarta.persistence.NamedNativeQuery
if ( namedJpaQueryList == null ) {
namedJpaQueryList = new ArrayList<>();
}
final NamedNativeQueryJpaAnnotation namedQuery = JpaAnnotations.NAMED_NATIVE_QUERY.createUsage( modelBuildingContext );
namedJpaQueryList.add( namedQuery );
namedQuery.apply( jaxbNamedQuery, xmlDocumentContext );
}
else {
// @org.hibernate.annotations.NamedNativeQuery
if ( namedQueryList == null ) {
namedQueryList = new ArrayList<>();
}
final NamedNativeQueryAnnotation namedQuery = HibernateAnnotations.NAMED_NATIVE_QUERY.createUsage( modelBuildingContext );
namedQueryList.add( namedQuery );
namedQuery.apply( jaxbNamedQuery, xmlDocumentContext );
}
}
if ( namedJpaQueryList != null ) {
final NamedNativeQueriesJpaAnnotation namedQueriesUsage = (NamedNativeQueriesJpaAnnotation) classDetails.replaceAnnotationUsage(
JpaAnnotations.NAMED_NATIVE_QUERY,
JpaAnnotations.NAMED_NATIVE_QUERIES,
modelBuildingContext
);
namedQueriesUsage.value( namedJpaQueryList.toArray( jakarta.persistence.NamedNativeQuery[]::new ) );
}
if ( namedQueryList != null ) {
final NamedNativeQueriesAnnotation namedQueriesUsage = (NamedNativeQueriesAnnotation) classDetails.replaceAnnotationUsage(
HibernateAnnotations.NAMED_NATIVE_QUERY,
HibernateAnnotations.NAMED_NATIVE_QUERIES,
modelBuildingContext
);
namedQueriesUsage.value( namedQueryList.toArray(NamedNativeQuery[]::new ) );
}
}
private static boolean needsJpaNativeQuery(JaxbNamedNativeQueryImpl jaxbNamedQuery) {
return CollectionHelper.isNotEmpty( jaxbNamedQuery.getHints() )
|| CollectionHelper.isNotEmpty( jaxbNamedQuery.getColumnResult() )
|| CollectionHelper.isNotEmpty( jaxbNamedQuery.getConstructorResult() )
|| CollectionHelper.isNotEmpty( jaxbNamedQuery.getEntityResult() );
}
private static final ColumnResult[] NO_COLUMN_RESULTS = new ColumnResult[0];
public static ColumnResult[] extractColumnResults(
List<JaxbColumnResultImpl> jaxbColumnResultList,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbColumnResultList ) ) {
return NO_COLUMN_RESULTS;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
final ColumnResult[] columnResults = new ColumnResult[jaxbColumnResultList.size()];
for ( int i = 0; i < jaxbColumnResultList.size(); i++ ) {
final ColumnResultJpaAnnotation columnResult = JpaAnnotations.COLUMN_RESULT.createUsage( modelBuildingContext );
columnResults[i] = columnResult;
final JaxbColumnResultImpl jaxbColumnResult = jaxbColumnResultList.get( i );
columnResult.name( jaxbColumnResult.getName() );
if ( isNotEmpty( jaxbColumnResult.getClazz() ) ) {
columnResult.type( xmlDocumentContext.resolveJavaType( jaxbColumnResult.getClazz() ).toJavaClass() );
}
}
return columnResults;
}
private final static ConstructorResult[] NO_CONSTRUCTOR_RESULTS = new ConstructorResult[0];
public static ConstructorResult[] extractConstructorResults(
List<JaxbConstructorResultImpl> jaxbConstructorResultList,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbConstructorResultList ) ) {
return NO_CONSTRUCTOR_RESULTS;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
final ConstructorResult[] constructorResults = new ConstructorResult[jaxbConstructorResultList.size()];
for ( int i = 0; i < jaxbConstructorResultList.size(); i++ ) {
final ConstructorResultJpaAnnotation constructorResult = JpaAnnotations.CONSTRUCTOR_RESULT.createUsage( modelBuildingContext );
constructorResults[i] = constructorResult;
final JaxbConstructorResultImpl jaxbConstructorResult = jaxbConstructorResultList.get( i );
constructorResult.targetClass( xmlDocumentContext.resolveJavaType( jaxbConstructorResult.getTargetClass() ).toJavaClass() );
if ( CollectionHelper.isNotEmpty( jaxbConstructorResult.getColumns() ) ) {
final ColumnResult[] columnResults = extractColumnResults(
jaxbConstructorResult.getColumns(),
xmlDocumentContext
);
if ( columnResults != null ) {
constructorResult.columns( columnResults );
}
}
}
return constructorResults;
}
private static final EntityResult[] NO_ENTITY_RESULTS = new EntityResult[0];
public static EntityResult[] extractEntityResults(
List<JaxbEntityResultImpl> jaxbEntityResults,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbEntityResults ) ) {
return NO_ENTITY_RESULTS;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
final EntityResult[] entityResults = new EntityResult[jaxbEntityResults.size()];
for ( int i = 0; i < jaxbEntityResults.size(); i++ ) {
final EntityResultJpaAnnotation entityResult = JpaAnnotations.ENTITY_RESULT.createUsage( modelBuildingContext );
entityResults[i] = entityResult;
final JaxbEntityResultImpl jaxbEntityResult = jaxbEntityResults.get( i );
entityResult.entityClass( xmlDocumentContext.resolveJavaType( jaxbEntityResult.getEntityClass() ).toJavaClass() );
if ( StringHelper.isNotEmpty( jaxbEntityResult.getDiscriminatorColumn() ) ) {
entityResult.discriminatorColumn( jaxbEntityResult.getDiscriminatorColumn() );
}
if ( jaxbEntityResult.getLockMode() != null ) {
entityResult.lockMode( jaxbEntityResult.getLockMode() );
}
final FieldResult[] fieldResults = extractFieldResults(
jaxbEntityResult.getFieldResult(),
xmlDocumentContext
);
if ( fieldResults != null ) {
entityResult.fields( fieldResults );
}
}
return entityResults;
}
private static FieldResult[] extractFieldResults(
List<JaxbFieldResultImpl> jaxbFieldResults,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbFieldResults ) ) {
return null;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
final FieldResult[] fieldResults = new FieldResult[jaxbFieldResults.size()];
for ( int i = 0; i < jaxbFieldResults.size(); i++ ) {
final FieldResultJpaAnnotation fieldResult = JpaAnnotations.FIELD_RESULT.createUsage( modelBuildingContext );
fieldResults[i] = fieldResult;
final JaxbFieldResultImpl jaxbFieldResult = jaxbFieldResults.get( i );
fieldResult.name( jaxbFieldResult.getName() );
fieldResult.column( jaxbFieldResult.getColumn() );
}
return fieldResults;
}
public static void applyNamedProcedureQueries(
JaxbEntityImpl jaxbEntity,
MutableClassDetails classDetails,
XmlDocumentContext xmlDocumentContext) {
final List<JaxbNamedStoredProcedureQueryImpl> jaxbQueries = jaxbEntity.getNamedStoredProcedureQueries();
if ( CollectionHelper.isEmpty( jaxbQueries ) ) {
return;
}
final ModelsContext modelBuildingContext = xmlDocumentContext.getModelBuildingContext();
final NamedStoredProcedureQueriesJpaAnnotation namedQueriesUsage = (NamedStoredProcedureQueriesJpaAnnotation) classDetails.replaceAnnotationUsage(
NAMED_STORED_PROCEDURE_QUERY,
JpaAnnotations.NAMED_STORED_PROCEDURE_QUERIES,
modelBuildingContext
);
final NamedStoredProcedureQuery[] namedQueries = new NamedStoredProcedureQuery[jaxbQueries.size()];
namedQueriesUsage.value( namedQueries );
for ( int i = 0; i < jaxbQueries.size(); i++ ) {
final NamedStoredProcedureQueryJpaAnnotation namedQuery = NAMED_STORED_PROCEDURE_QUERY.createUsage( modelBuildingContext );
namedQueries[i] = namedQuery;
final JaxbNamedStoredProcedureQueryImpl jaxbQuery = jaxbQueries.get( i );
namedQuery.apply( jaxbQuery, xmlDocumentContext );
}
}
private static final StoredProcedureParameter[] NO_PARAMS = new StoredProcedureParameter[0];
public static StoredProcedureParameter[] collectParameters(
List<JaxbStoredProcedureParameterImpl> jaxbParameters,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbParameters ) ) {
return NO_PARAMS;
}
final ModelsContext ModelsContext = xmlDocumentContext.getModelBuildingContext();
final StoredProcedureParameter[] result = new StoredProcedureParameter[jaxbParameters.size()];
for ( int i = 0; i < jaxbParameters.size(); i++ ) {
final StoredProcedureParameterJpaAnnotation param = JpaAnnotations.STORED_PROCEDURE_PARAMETER.createUsage( ModelsContext );
result[i] = param;
final JaxbStoredProcedureParameterImpl jaxbParam = jaxbParameters.get( i );
param.apply( jaxbParam, xmlDocumentContext );
}
return result;
}
private static final Class<?>[] NO_CLASSES = new Class[0];
public static Class<?>[] collectResultClasses(List<String> resultClasses, XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( resultClasses ) ) {
return NO_CLASSES;
}
final Class<?>[] result = new Class<?>[resultClasses.size()];
for ( int i = 0; i < resultClasses.size(); i++ ) {
result[i] = xmlDocumentContext.resolveJavaType( resultClasses.get( i ) ).toJavaClass();
}
return result;
}
public static String[] collectResultMappings(List<String> resultClasses, XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( resultClasses ) ) {
return ArrayHelper.EMPTY_STRING_ARRAY;
}
return resultClasses.toArray( String[]::new );
}
}
| QueryProcessing |
java | apache__camel | components/camel-ai/camel-langchain4j-agent-api/src/main/java/org/apache/camel/component/langchain4j/agent/api/Agent.java | {
"start": 1191,
"end": 1312
} | interface ____ abstracts different types of AI agents within the Apache Camel LangChain4j integration.
*
* <p>
* This | that |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converter/MoneyConverterTest.java | {
"start": 2492,
"end": 2884
} | class ____
implements AttributeConverter<Money, Long> {
@Override
public Long convertToDatabaseColumn(Money attribute) {
return attribute == null ? null : attribute.getCents();
}
@Override
public Money convertToEntityAttribute(Long dbData) {
return dbData == null ? null : new Money(dbData);
}
}
//end::basic-jpa-convert-money-converter-mapping-example[]
}
| MoneyConverter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/otherentityentrycontext/OtherEntityEntryContextTest.java | {
"start": 1641,
"end": 1789
} | class ____ {
@Id
Long id;
String name;
Parent() {
}
Parent(Long id, String name) {
this.id = id;
this.name = name;
}
}
}
| Parent |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/transport/GenerateTransportVersionDefinitionTask.java | {
"start": 1913,
"end": 16266
} | class ____ extends DefaultTask {
/**
* Files that contain the references to the transport version names.
*/
@InputFiles
@PathSensitive(PathSensitivity.RELATIVE)
public abstract ConfigurableFileCollection getReferencesFiles();
@ServiceReference("transportVersionResources")
abstract Property<TransportVersionResourcesService> getResourceService();
@Input
@Optional
@Option(option = "name", description = "The name of the Transport Version definition, e.g. --name=my_new_tv")
public abstract Property<String> getDefinitionName();
@Input
@Optional
@Option(
option = "backport-branches",
description = "The branches this definition will be backported to, e.g. --backport-branches=9.1,8.19"
)
public abstract Property<String> getBackportBranches();
@Input
@Optional
@Option(option = "increment", description = "The amount to increment the id from the current upper bounds file by")
public abstract Property<Integer> getIncrement();
@Input
@Optional
abstract Property<Boolean> getResolveConflict();
/**
* The name of the upper bounds file which will be used at runtime on the current branch. Normally
* this equates to VersionProperties.getElasticsearchVersion().
*/
@Input
public abstract Property<String> getCurrentUpperBoundName();
/**
* An additional upper bound file that will be consulted when generating a transport version.
* The larger of this and the current upper bound will be used to create the new primary id.
*/
@InputFile
@Optional
public abstract RegularFileProperty getAlternateUpperBoundFile();
@TaskAction
public void run() throws IOException {
TransportVersionResourcesService resources = getResourceService().get();
List<TransportVersionUpperBound> upstreamUpperBounds = resources.getUpperBoundsFromGitBase();
boolean onReleaseBranch = resources.checkIfDefinitelyOnReleaseBranch(upstreamUpperBounds, getCurrentUpperBoundName().get());
if (onReleaseBranch) {
throw new IllegalArgumentException("Transport version generation cannot run on release branches");
}
Set<String> referencedNames = TransportVersionReference.collectNames(getReferencesFiles());
Set<String> changedDefinitionNames = resources.getChangedReferableDefinitionNames();
String targetDefinitionName = getTargetDefinitionName(resources, referencedNames, changedDefinitionNames);
// First check for any unused definitions. This later generation to not get confused by a definition that can't be used.
removeUnusedNamedDefinitions(resources, referencedNames, changedDefinitionNames);
Map<Integer, List<IdAndDefinition>> idsByBase = resources.getIdsByBase();
if (targetDefinitionName.isEmpty()) {
getLogger().lifecycle("No transport version name detected, resetting upper bounds");
resetAllUpperBounds(resources, idsByBase);
} else {
getLogger().lifecycle("Generating transport version name: " + targetDefinitionName);
Set<String> targetUpperBoundNames = getTargetUpperBoundNames(resources, upstreamUpperBounds, targetDefinitionName);
List<TransportVersionId> ids = updateUpperBounds(
resources,
upstreamUpperBounds,
targetUpperBoundNames,
idsByBase,
targetDefinitionName
);
// (Re)write the definition file.
resources.writeDefinition(new TransportVersionDefinition(targetDefinitionName, ids, true));
}
}
private List<TransportVersionId> updateUpperBounds(
TransportVersionResourcesService resources,
List<TransportVersionUpperBound> existingUpperBounds,
Set<String> targetUpperBoundNames,
Map<Integer, List<IdAndDefinition>> idsByBase,
String definitionName
) throws IOException {
String currentUpperBoundName = getCurrentUpperBoundName().get();
int increment = getIncrement().get();
if (increment <= 0) {
throw new IllegalArgumentException("Invalid increment " + increment + ", must be a positive integer");
}
if (increment > 1000) {
throw new IllegalArgumentException("Invalid increment " + increment + ", must be no larger than 1000");
}
List<TransportVersionId> ids = new ArrayList<>();
boolean stageInGit = getResolveConflict().getOrElse(false);
TransportVersionDefinition existingDefinition = resources.getReferableDefinitionFromGitBase(definitionName);
for (TransportVersionUpperBound existingUpperBound : existingUpperBounds) {
String upperBoundName = existingUpperBound.name();
if (targetUpperBoundNames.contains(upperBoundName)) {
// Case: targeting this upper bound, find an existing id if it exists
TransportVersionId targetId = maybeGetExistingId(existingUpperBound, existingDefinition, definitionName);
if (targetId == null) {
// Case: an id doesn't yet exist for this upper bound, so create one
int targetIncrement = upperBoundName.equals(currentUpperBoundName) ? increment : 1;
targetId = createTargetId(existingUpperBound, targetIncrement);
var newUpperBound = new TransportVersionUpperBound(upperBoundName, definitionName, targetId);
resources.writeUpperBound(newUpperBound, stageInGit);
}
ids.add(targetId);
} else if (resources.getChangedUpperBoundNames().contains(upperBoundName)) {
// Default case: we're not targeting this branch so reset it
resetUpperBound(resources, existingUpperBound, idsByBase, definitionName);
}
}
Collections.sort(ids);
return ids;
}
private String getTargetDefinitionName(
TransportVersionResourcesService resources,
Set<String> referencedNames,
Set<String> changedDefinitions
) {
if (getDefinitionName().isPresent()) {
// an explicit name was passed in, so use it
return getDefinitionName().get();
}
// First check for unreferenced names. We only care about the first one. If there is more than one
// validation will fail later and the developer will have to remove one. When that happens, generation
// will re-run and we will fixup the state to use whatever new name remains.
for (String referencedName : referencedNames) {
if (resources.referableDefinitionExists(referencedName) == false) {
return referencedName;
}
}
// Since we didn't find any missing names, we use the first changed name. If there is more than
// one changed name, validation will fail later, just as above.
if (changedDefinitions.isEmpty()) {
return "";
} else {
String changedDefinitionName = changedDefinitions.iterator().next();
if (referencedNames.contains(changedDefinitionName)) {
return changedDefinitionName;
} else {
return ""; // the changed name is unreferenced, so go into "reset mode"
}
}
}
private Set<String> getTargetUpperBoundNames(
TransportVersionResourcesService resources,
List<TransportVersionUpperBound> upstreamUpperBounds,
String targetDefinitionName
) throws IOException {
if (getResolveConflict().getOrElse(false)) {
if (getBackportBranches().isPresent()) {
throw new IllegalArgumentException("Cannot use --resolve-conflict with --backport-branches");
}
return getUpperBoundNamesFromDefinition(resources, upstreamUpperBounds, targetDefinitionName);
}
Set<String> targetUpperBoundNames = new HashSet<>();
targetUpperBoundNames.add(getCurrentUpperBoundName().get());
if (getBackportBranches().isPresent()) {
targetUpperBoundNames.addAll(List.of(getBackportBranches().get().split(",")));
}
Set<String> missingBranches = new HashSet<>(targetUpperBoundNames);
List<String> knownUpperBoundNames = new ArrayList<>();
for (TransportVersionUpperBound upperBound : upstreamUpperBounds) {
knownUpperBoundNames.add(upperBound.name());
missingBranches.remove(upperBound.name());
}
if (missingBranches.isEmpty() == false) {
List<String> sortedMissing = missingBranches.stream().sorted().toList();
List<String> sortedKnown = knownUpperBoundNames.stream().sorted().toList();
throw new IllegalArgumentException(
"Missing upper bounds files for branches " + sortedMissing + ", known branches are " + sortedKnown
);
}
return targetUpperBoundNames;
}
private Set<String> getUpperBoundNamesFromDefinition(
TransportVersionResourcesService resources,
List<TransportVersionUpperBound> upstreamUpperBounds,
String targetDefinitionName
) throws IOException {
TransportVersionDefinition definition = resources.getReferableDefinition(targetDefinitionName);
Set<String> upperBoundNames = new HashSet<>();
upperBoundNames.add(getCurrentUpperBoundName().get());
// skip the primary id as that is current, which we always add
for (int i = 1; i < definition.ids().size(); ++i) {
TransportVersionId id = definition.ids().get(i);
// we have a small number of upper bound files, so just scan for the ones we want
for (TransportVersionUpperBound upperBound : upstreamUpperBounds) {
if (upperBound.definitionId().base() == id.base()) {
upperBoundNames.add(upperBound.name());
}
}
}
return upperBoundNames;
}
private void resetAllUpperBounds(TransportVersionResourcesService resources, Map<Integer, List<IdAndDefinition>> idsByBase)
throws IOException {
for (String upperBoundName : resources.getChangedUpperBoundNames()) {
TransportVersionUpperBound upstreamUpperBound = resources.getUpperBoundFromGitBase(upperBoundName);
resetUpperBound(resources, upstreamUpperBound, idsByBase, null);
}
}
private void resetUpperBound(
TransportVersionResourcesService resources,
TransportVersionUpperBound upperBound,
Map<Integer, List<IdAndDefinition>> idsByBase,
String ignoreDefinitionName
) throws IOException {
List<IdAndDefinition> idsForUpperBound = idsByBase.get(upperBound.definitionId().base());
if (idsForUpperBound == null) {
throw new RuntimeException("Could not find base id: " + upperBound.definitionId().base());
}
IdAndDefinition resetValue = idsForUpperBound.getLast();
if (resetValue.definition().name().equals(ignoreDefinitionName)) {
// there must be another definition in this base since the ignored definition is new
assert idsForUpperBound.size() >= 2;
resetValue = idsForUpperBound.get(idsForUpperBound.size() - 2);
}
var resetUpperBound = new TransportVersionUpperBound(upperBound.name(), resetValue.definition().name(), resetValue.id());
resources.writeUpperBound(resetUpperBound, false);
}
private void removeUnusedNamedDefinitions(
TransportVersionResourcesService resources,
Set<String> referencedNames,
Set<String> changedDefinitions
) throws IOException {
for (String definitionName : changedDefinitions) {
if (referencedNames.contains(definitionName) == false) {
// we added this definition file, but it's now unreferenced, so delete it
getLogger().lifecycle("Deleting unreferenced named transport version definition [" + definitionName + "]");
resources.deleteReferableDefinition(definitionName);
}
}
}
private TransportVersionId maybeGetExistingId(
TransportVersionUpperBound upperBound,
TransportVersionDefinition existingDefinition,
String name
) {
if (existingDefinition == null) {
// the name doesn't yet exist, so there is no id to return
return null;
}
if (upperBound.definitionName().equals(name)) {
// the name exists and this upper bound already points at it
return upperBound.definitionId();
}
if (upperBound.name().equals(getCurrentUpperBoundName().get())) {
// this is the upper bound of the current branch, so use the primary id
return existingDefinition.ids().getFirst();
}
// the upper bound is for a non-current branch, so find the id with the same base
for (TransportVersionId id : existingDefinition.ids()) {
if (id.base() == upperBound.definitionId().base()) {
return id;
}
}
return null; // no existing id for this upper bound
}
private TransportVersionId createTargetId(TransportVersionUpperBound existingUpperBound, int increment) throws IOException {
int currentId = existingUpperBound.definitionId().complete();
// allow for an alternate upper bound file to be consulted. This supports Serverless basing its
// own transport version ids on the greater of server or serverless
if (getAlternateUpperBoundFile().isPresent()) {
Path altUpperBoundPath = getAlternateUpperBoundFile().get().getAsFile().toPath();
String contents = Files.readString(altUpperBoundPath, StandardCharsets.UTF_8);
var altUpperBound = TransportVersionUpperBound.fromString(altUpperBoundPath, contents);
if (altUpperBound.definitionId().complete() > currentId) {
currentId = altUpperBound.definitionId().complete();
}
}
return TransportVersionId.fromInt(currentId + increment);
}
}
| GenerateTransportVersionDefinitionTask |
java | google__dagger | javatests/dagger/functional/factory/SubcomponentFactoryTest.java | {
"start": 2206,
"end": 2527
} | class ____ {
private final Sub.Factory subFactory;
@Inject
UsesSubcomponentFactory(Sub.Factory subFactory) {
this.subFactory = subFactory;
}
Sub getSubcomponent(String s) {
return subFactory.create(s);
}
}
@Component(modules = ModuleWithSubcomponent.class)
| UsesSubcomponentFactory |
java | grpc__grpc-java | okhttp/third_party/okhttp/main/java/io/grpc/okhttp/internal/Platform.java | {
"start": 18372,
"end": 18821
} | class ____?");
return null;
}
return provider.unsupported ? null : provider.selected;
} catch (InvocationTargetException e) {
throw new AssertionError();
} catch (IllegalAccessException e) {
throw new AssertionError();
}
}
}
/**
* Handle the methods of ALPN's ClientProvider and ServerProvider
* without a compile-time dependency on those interfaces.
*/
private static | path |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestCreate.java | {
"start": 1172,
"end": 1441
} | class ____ extends AbstractContractCreateTest {
@BeforeAll
public static void before() {
assumeTrue(TestEnv.checkTestEnabled());
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new TosContract(conf);
}
}
| TestCreate |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java | {
"start": 4547,
"end": 5735
} | class ____ implements LeaderRetrievalListener {
private final CompletableFuture<LeaderInformation> connectionInfoFuture =
new CompletableFuture<>();
public CompletableFuture<LeaderInformation> getLeaderInformationFuture() {
return connectionInfoFuture;
}
@Override
public void notifyLeaderAddress(String leaderAddress, UUID leaderSessionID) {
if (leaderAddress != null
&& !leaderAddress.equals("")
&& !connectionInfoFuture.isDone()) {
final LeaderInformation leaderInformation =
LeaderInformation.known(leaderSessionID, leaderAddress);
connectionInfoFuture.complete(leaderInformation);
}
}
@Override
public void handleError(Exception exception) {
connectionInfoFuture.completeExceptionally(exception);
}
}
// ------------------------------------------------------------------------
/** Private constructor to prevent instantiation. */
private LeaderRetrievalUtils() {
throw new RuntimeException();
}
}
| LeaderInformationListener |
java | apache__camel | dsl/camel-xml-io-dsl/src/test/java/org/apache/camel/dsl/xml/io/beans/MyCtrBean.java | {
"start": 854,
"end": 1448
} | class ____ {
private String field1;
private String field2;
private int age;
public MyCtrBean(String field1, String field2) {
this.field1 = field1;
this.field2 = field2;
}
public String getField1() {
return field1;
}
public String getField2() {
return field2;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public String hello(String body) {
return field1 + " " + body + ". I am " + field2 + " and " + age + " years old!";
}
}
| MyCtrBean |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java | {
"start": 12166,
"end": 159058
} | class ____ implements ByteBufAllocator {
private final ByteBufAllocator allocator;
private final BufferType type;
TestByteBufAllocator(ByteBufAllocator allocator, BufferType type) {
this.allocator = allocator;
this.type = type;
}
@Override
public ByteBuf buffer() {
switch (type) {
case Direct:
return allocator.directBuffer();
case Heap:
return allocator.heapBuffer();
case Mixed:
return ThreadLocalRandom.current().nextBoolean() ?
allocator.directBuffer() : allocator.heapBuffer();
default:
throw new Error("Unexpected buffer type: " + type);
}
}
@Override
public ByteBuf buffer(int initialCapacity) {
switch (type) {
case Direct:
return allocator.directBuffer(initialCapacity);
case Heap:
return allocator.heapBuffer(initialCapacity);
case Mixed:
return ThreadLocalRandom.current().nextBoolean() ?
allocator.directBuffer(initialCapacity) : allocator.heapBuffer(initialCapacity);
default:
throw new Error("Unexpected buffer type: " + type);
}
}
@Override
public ByteBuf buffer(int initialCapacity, int maxCapacity) {
switch (type) {
case Direct:
return allocator.directBuffer(initialCapacity, maxCapacity);
case Heap:
return allocator.heapBuffer(initialCapacity, maxCapacity);
case Mixed:
return ThreadLocalRandom.current().nextBoolean() ?
allocator.directBuffer(initialCapacity, maxCapacity) :
allocator.heapBuffer(initialCapacity, maxCapacity);
default:
throw new Error("Unexpected buffer type: " + type);
}
}
@Override
public ByteBuf ioBuffer() {
return allocator.ioBuffer();
}
@Override
public ByteBuf ioBuffer(int initialCapacity) {
return allocator.ioBuffer(initialCapacity);
}
@Override
public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) {
return allocator.ioBuffer(initialCapacity, maxCapacity);
}
@Override
public ByteBuf heapBuffer() {
return allocator.heapBuffer();
}
@Override
public ByteBuf heapBuffer(int initialCapacity) {
return allocator.heapBuffer(initialCapacity);
}
@Override
public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) {
return allocator.heapBuffer(initialCapacity, maxCapacity);
}
@Override
public ByteBuf directBuffer() {
return allocator.directBuffer();
}
@Override
public ByteBuf directBuffer(int initialCapacity) {
return allocator.directBuffer(initialCapacity);
}
@Override
public ByteBuf directBuffer(int initialCapacity, int maxCapacity) {
return allocator.directBuffer(initialCapacity, maxCapacity);
}
@Override
public CompositeByteBuf compositeBuffer() {
switch (type) {
case Direct:
return allocator.compositeDirectBuffer();
case Heap:
return allocator.compositeHeapBuffer();
case Mixed:
return ThreadLocalRandom.current().nextBoolean() ?
allocator.compositeDirectBuffer() :
allocator.compositeHeapBuffer();
default:
throw new Error("Unexpected buffer type: " + type);
}
}
@Override
public CompositeByteBuf compositeBuffer(int maxNumComponents) {
switch (type) {
case Direct:
return allocator.compositeDirectBuffer(maxNumComponents);
case Heap:
return allocator.compositeHeapBuffer(maxNumComponents);
case Mixed:
return ThreadLocalRandom.current().nextBoolean() ?
allocator.compositeDirectBuffer(maxNumComponents) :
allocator.compositeHeapBuffer(maxNumComponents);
default:
throw new Error("Unexpected buffer type: " + type);
}
}
@Override
public CompositeByteBuf compositeHeapBuffer() {
return allocator.compositeHeapBuffer();
}
@Override
public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) {
return allocator.compositeHeapBuffer(maxNumComponents);
}
@Override
public CompositeByteBuf compositeDirectBuffer() {
return allocator.compositeDirectBuffer();
}
@Override
public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) {
return allocator.compositeDirectBuffer(maxNumComponents);
}
@Override
public boolean isDirectBufferPooled() {
return allocator.isDirectBufferPooled();
}
@Override
public int calculateNewCapacity(int minNewCapacity, int maxCapacity) {
return allocator.calculateNewCapacity(minNewCapacity, maxCapacity);
}
}
@BeforeEach
public void setup() {
serverLatch = new CountDownLatch(1);
clientLatch = new CountDownLatch(1);
delegatingExecutor = new DelayingExecutor();
serverReceiver = new MessageReceiver();
clientReceiver = new MessageReceiver();
}
@AfterEach
public void tearDown() throws InterruptedException {
ChannelFuture clientCloseFuture = null;
ChannelFuture serverConnectedCloseFuture = null;
ChannelFuture serverCloseFuture = null;
if (clientChannel != null) {
clientCloseFuture = clientChannel.close();
clientChannel = null;
}
if (serverConnectedChannel != null) {
serverConnectedCloseFuture = serverConnectedChannel.close();
serverConnectedChannel = null;
}
if (serverChannel != null) {
serverCloseFuture = serverChannel.close();
serverChannel = null;
}
// We must wait for the Channel cleanup to finish. In the case if the ReferenceCountedOpenSslEngineTest
// the ReferenceCountedOpenSslEngine depends upon the SslContext and so we must wait the cleanup the
// SslContext to avoid JVM core dumps!
//
// See https://github.com/netty/netty/issues/5692
if (clientCloseFuture != null) {
clientCloseFuture.sync();
}
if (serverConnectedCloseFuture != null) {
serverConnectedCloseFuture.sync();
}
if (serverCloseFuture != null) {
serverCloseFuture.sync();
}
if (serverSslCtx != null) {
cleanupServerSslContext(serverSslCtx);
serverSslCtx = null;
}
if (clientSslCtx != null) {
cleanupClientSslContext(clientSslCtx);
clientSslCtx = null;
}
Future<?> serverGroupShutdownFuture = null;
Future<?> serverChildGroupShutdownFuture = null;
Future<?> clientGroupShutdownFuture = null;
if (sb != null) {
serverGroupShutdownFuture = sb.config().group().shutdownGracefully(0, 0, TimeUnit.MILLISECONDS);
serverChildGroupShutdownFuture = sb.config().childGroup().shutdownGracefully(0, 0, TimeUnit.MILLISECONDS);
}
if (cb != null) {
clientGroupShutdownFuture = cb.config().group().shutdownGracefully(0, 0, TimeUnit.MILLISECONDS);
}
if (serverGroupShutdownFuture != null) {
serverGroupShutdownFuture.sync();
serverChildGroupShutdownFuture.sync();
}
if (clientGroupShutdownFuture != null) {
clientGroupShutdownFuture.sync();
}
delegatingExecutor.shutdown();
serverException = null;
clientException = null;
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthSameCerts(SSLEngineTestParam param) throws Throwable {
mySetupMutualAuth(param, ResourcesUtil.getFile(getClass(), "test_unencrypted.pem"),
ResourcesUtil.getFile(getClass(), "test.crt"),
null);
runTest(null);
assertTrue(serverLatch.await(2, TimeUnit.SECONDS));
Throwable cause = serverException;
if (cause != null) {
throw cause;
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSetSupportedCiphers(SSLEngineTestParam param) throws Exception {
if (param.protocolCipherCombo != ProtocolCipherCombo.tlsv12()) {
return;
}
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(cert.key(), cert.cert())
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslProvider(sslServerProvider()).build());
final SSLEngine serverEngine =
wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(cert.certificate())
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslProvider(sslClientProvider()).build());
final SSLEngine clientEngine =
wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
final String[] enabledCiphers = new String[]{ param.ciphers().get(0) };
try {
clientEngine.setEnabledCipherSuites(enabledCiphers);
serverEngine.setEnabledCipherSuites(enabledCiphers);
assertArrayEquals(enabledCiphers, clientEngine.getEnabledCipherSuites());
assertArrayEquals(enabledCiphers, serverEngine.getEnabledCipherSuites());
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testIncompatibleCiphers(final SSLEngineTestParam param) throws Exception {
assumeTrue(SslProvider.isTlsv13Supported(sslClientProvider()));
assumeTrue(SslProvider.isTlsv13Supported(sslServerProvider()));
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
// Select a mandatory cipher from the TLSv1.2 RFC https://www.ietf.org/rfc/rfc5246.txt so handshakes won't fail
// due to no shared/supported cipher.
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.protocols(SslProtocols.TLS_v1_3, SslProtocols.TLS_v1_2, SslProtocols.TLS_v1)
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.build());
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.protocols(SslProtocols.TLS_v1_3, SslProtocols.TLS_v1_2, SslProtocols.TLS_v1)
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
// Set the server to only support a single TLSv1.2 cipher
final String serverCipher =
// JDK24+ does not support TLS_RSA_* ciphers by default anymore:
// See https://www.java.com/en/configure_crypto.html
PlatformDependent.javaVersion() >= 24 ? "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" :
"TLS_RSA_WITH_AES_128_CBC_SHA";
serverEngine.setEnabledCipherSuites(new String[] { serverCipher });
// Set the client to only support a single TLSv1.3 cipher
final String clientCipher = "TLS_AES_256_GCM_SHA384";
clientEngine.setEnabledCipherSuites(new String[] { clientCipher });
final SSLEngine client = clientEngine;
final SSLEngine server = serverEngine;
assertThrows(SSLHandshakeException.class, new Executable() {
@Override
public void execute() throws Throwable {
handshake(param.type(), param.delegate(), client, server);
}
});
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthDiffCerts(SSLEngineTestParam param) throws Exception {
File serverKeyFile = ResourcesUtil.getFile(getClass(), "test_encrypted.pem");
File serverCrtFile = ResourcesUtil.getFile(getClass(), "test.crt");
String serverKeyPassword = "12345";
File clientKeyFile = ResourcesUtil.getFile(getClass(), "test2_encrypted.pem");
File clientCrtFile = ResourcesUtil.getFile(getClass(), "test2.crt");
String clientKeyPassword = "12345";
mySetupMutualAuth(param, clientCrtFile, serverKeyFile, serverCrtFile, serverKeyPassword,
serverCrtFile, clientKeyFile, clientCrtFile, clientKeyPassword);
runTest(null);
assertTrue(serverLatch.await(2, TimeUnit.SECONDS));
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthDiffCertsServerFailure(SSLEngineTestParam param) throws Exception {
File serverKeyFile = ResourcesUtil.getFile(getClass(), "test_encrypted.pem");
File serverCrtFile = ResourcesUtil.getFile(getClass(), "test.crt");
String serverKeyPassword = "12345";
File clientKeyFile = ResourcesUtil.getFile(getClass(), "test2_encrypted.pem");
File clientCrtFile = ResourcesUtil.getFile(getClass(), "test2.crt");
String clientKeyPassword = "12345";
// Client trusts server but server only trusts itself
mySetupMutualAuth(param, serverCrtFile, serverKeyFile, serverCrtFile, serverKeyPassword,
serverCrtFile, clientKeyFile, clientCrtFile, clientKeyPassword);
assertTrue(serverLatch.await(10, TimeUnit.SECONDS));
assertTrue(serverException instanceof SSLHandshakeException);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthDiffCertsClientFailure(SSLEngineTestParam param) throws Exception {
File serverKeyFile = ResourcesUtil.getFile(getClass(), "test_unencrypted.pem");
File serverCrtFile = ResourcesUtil.getFile(getClass(), "test.crt");
String serverKeyPassword = null;
File clientKeyFile = ResourcesUtil.getFile(getClass(), "test2_unencrypted.pem");
File clientCrtFile = ResourcesUtil.getFile(getClass(), "test2.crt");
String clientKeyPassword = null;
// Server trusts client but client only trusts itself
mySetupMutualAuth(param, clientCrtFile, serverKeyFile, serverCrtFile, serverKeyPassword,
clientCrtFile, clientKeyFile, clientCrtFile, clientKeyPassword);
assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
assertTrue(clientException instanceof SSLHandshakeException);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthInvalidIntermediateCASucceedWithOptionalClientAuth(SSLEngineTestParam param)
throws Exception {
testMutualAuthInvalidClientCertSucceed(param, ClientAuth.NONE);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthInvalidIntermediateCAFailWithOptionalClientAuth(SSLEngineTestParam param)
throws Exception {
testMutualAuthClientCertFail(param, ClientAuth.OPTIONAL);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthInvalidIntermediateCAFailWithRequiredClientAuth(SSLEngineTestParam param)
throws Exception {
testMutualAuthClientCertFail(param, ClientAuth.REQUIRE);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthValidClientCertChainTooLongFailOptionalClientAuth(SSLEngineTestParam param)
throws Exception {
testMutualAuthClientCertFail(param, ClientAuth.OPTIONAL, "mutual_auth_client.p12", true);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMutualAuthValidClientCertChainTooLongFailRequireClientAuth(SSLEngineTestParam param)
throws Exception {
testMutualAuthClientCertFail(param, ClientAuth.REQUIRE, "mutual_auth_client.p12", true);
}
private void testMutualAuthInvalidClientCertSucceed(SSLEngineTestParam param, ClientAuth auth) throws Exception {
char[] password = "example".toCharArray();
final KeyStore serverKeyStore = KeyStore.getInstance("PKCS12");
try (InputStream resourceAsStream = getClass().getResourceAsStream("mutual_auth_server.p12")) {
serverKeyStore.load(resourceAsStream, password);
}
final KeyStore clientKeyStore = KeyStore.getInstance("PKCS12");
try (InputStream resourceAsStream = getClass().getResourceAsStream("mutual_auth_invalid_client.p12")) {
clientKeyStore.load(resourceAsStream, password);
}
final KeyManagerFactory serverKeyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
serverKeyManagerFactory.init(serverKeyStore, password);
final KeyManagerFactory clientKeyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
clientKeyManagerFactory.init(clientKeyStore, password);
File commonCertChain = ResourcesUtil.getFile(getClass(), "mutual_auth_ca.pem");
mySetupMutualAuth(param, serverKeyManagerFactory, commonCertChain, clientKeyManagerFactory, commonCertChain,
auth, false, false);
assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
rethrowIfNotNull(clientException);
assertTrue(serverLatch.await(5, TimeUnit.SECONDS));
rethrowIfNotNull(serverException);
}
private void testMutualAuthClientCertFail(SSLEngineTestParam param, ClientAuth auth) throws Exception {
testMutualAuthClientCertFail(param, auth, "mutual_auth_invalid_client.p12", false);
}
private void testMutualAuthClientCertFail(SSLEngineTestParam param, ClientAuth auth, String clientCert,
boolean serverInitEngine)
throws Exception {
char[] password = "example".toCharArray();
final KeyStore serverKeyStore = KeyStore.getInstance("PKCS12");
try (InputStream resourceAsStream = getClass().getResourceAsStream("mutual_auth_server.p12")) {
serverKeyStore.load(resourceAsStream, password);
}
final KeyStore clientKeyStore = KeyStore.getInstance("PKCS12");
try (InputStream resourceAsStream = getClass().getResourceAsStream(clientCert)) {
clientKeyStore.load(resourceAsStream, password);
}
final KeyManagerFactory serverKeyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
serverKeyManagerFactory.init(serverKeyStore, password);
final KeyManagerFactory clientKeyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
clientKeyManagerFactory.init(clientKeyStore, password);
File commonCertChain = ResourcesUtil.getFile(getClass(), "mutual_auth_ca.pem");
mySetupMutualAuth(param, serverKeyManagerFactory, commonCertChain, clientKeyManagerFactory, commonCertChain,
auth, true, serverInitEngine);
assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
assertTrue(mySetupMutualAuthServerIsValidClientException(clientException),
"unexpected exception: " + clientException);
assertTrue(serverLatch.await(5, TimeUnit.SECONDS));
assertTrue(mySetupMutualAuthServerIsValidServerException(serverException),
"unexpected exception: " + serverException);
}
protected static boolean causedBySSLException(Throwable cause) {
Throwable next = cause;
do {
if (next instanceof SSLException) {
return true;
}
next = next.getCause();
} while (next != null);
return false;
}
protected boolean mySetupMutualAuthServerIsValidServerException(Throwable cause) {
return mySetupMutualAuthServerIsValidException(cause);
}
protected boolean mySetupMutualAuthServerIsValidClientException(Throwable cause) {
return mySetupMutualAuthServerIsValidException(cause);
}
protected boolean mySetupMutualAuthServerIsValidException(Throwable cause) {
// As in TLSv1.3 the handshake is sent without an extra roundtrip an SSLException is valid as well.
return cause instanceof SSLException || cause instanceof ClosedChannelException;
}
protected void mySetupMutualAuthServerInitSslHandler(SslHandler handler) {
}
protected void mySetupMutualAuth(final SSLEngineTestParam param, KeyManagerFactory serverKMF,
final File serverTrustManager,
KeyManagerFactory clientKMF, File clientTrustManager,
ClientAuth clientAuth, final boolean failureExpected,
final boolean serverInitEngine)
throws SSLException, InterruptedException {
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(serverKMF)
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.trustManager(serverTrustManager)
.clientAuth(clientAuth)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.sessionTimeout(0).build());
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.trustManager(clientTrustManager)
.keyManager(clientKMF)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.endpointIdentificationAlgorithm(null)
.sessionTimeout(0).build());
serverConnectedChannel = null;
sb = new ServerBootstrap();
cb = new Bootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type()));
ChannelPipeline p = ch.pipeline();
SslHandler handler = !param.delegate ? serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
if (serverInitEngine) {
mySetupMutualAuthServerInitSslHandler(handler);
}
p.addLast(handler);
p.addLast(new MessageDelegatorChannelHandler(serverReceiver, serverLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
if (failureExpected) {
serverException = new IllegalStateException("handshake complete. expected failure");
}
serverLatch.countDown();
} else if (evt instanceof SslHandshakeCompletionEvent) {
serverException = ((SslHandshakeCompletionEvent) evt).cause();
serverLatch.countDown();
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
serverException = cause.getCause();
serverLatch.countDown();
} else {
serverException = cause;
ctx.fireExceptionCaught(cause);
}
}
});
serverConnectedChannel = ch;
}
});
cb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
cb.channel(NioSocketChannel.class);
cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
ChannelPipeline p = ch.pipeline();
SslHandler handler = !param.delegate ? clientSslCtx.newHandler(ch.alloc()) :
clientSslCtx.newHandler(ch.alloc(), delegatingExecutor);
p.addLast(handler);
p.addLast(new MessageDelegatorChannelHandler(clientReceiver, clientLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
// With TLS1.3 a mutal auth error will not be propagated as a handshake error most of the
// time as the handshake needs NO extra roundtrip.
if (!failureExpected) {
clientLatch.countDown();
}
} else if (evt instanceof SslHandshakeCompletionEvent) {
clientException = ((SslHandshakeCompletionEvent) evt).cause();
clientLatch.countDown();
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLException) {
clientException = cause.getCause();
clientLatch.countDown();
} else {
ctx.fireExceptionCaught(cause);
}
}
});
}
});
serverChannel = sb.bind(new InetSocketAddress(0)).sync().channel();
int port = ((InetSocketAddress) serverChannel.localAddress()).getPort();
ChannelFuture ccf = cb.connect(new InetSocketAddress(NetUtil.LOCALHOST, port));
assertTrue(ccf.awaitUninterruptibly().isSuccess());
clientChannel = ccf.channel();
}
protected static void rethrowIfNotNull(Throwable error) {
if (error != null) {
fail("Expected no error", error);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testClientHostnameValidationSuccess(SSLEngineTestParam param) throws Exception {
mySetupClientHostnameValidation(param, ResourcesUtil.getFile(getClass(), "localhost_server.pem"),
ResourcesUtil.getFile(getClass(), "localhost_server.key"),
ResourcesUtil.getFile(getClass(), "mutual_auth_ca.pem"),
false);
assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
rethrowIfNotNull(clientException);
assertTrue(serverLatch.await(5, TimeUnit.SECONDS));
rethrowIfNotNull(serverException);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testClientHostnameValidationFail(SSLEngineTestParam param) throws Exception {
Future<Void> clientWriteFuture =
mySetupClientHostnameValidation(param, ResourcesUtil.getFile(getClass(), "notlocalhost_server.pem"),
ResourcesUtil.getFile(getClass(), "notlocalhost_server.key"),
ResourcesUtil.getFile(getClass(), "mutual_auth_ca.pem"),
true);
assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
assertTrue(mySetupMutualAuthServerIsValidClientException(clientException),
"unexpected exception: " + clientException);
assertTrue(serverLatch.await(5, TimeUnit.SECONDS));
assertTrue(mySetupMutualAuthServerIsValidServerException(serverException),
"unexpected exception: " + serverException);
// Verify that any pending writes are failed with the cached handshake exception and not a general SSLException.
clientWriteFuture.awaitUninterruptibly();
Throwable actualCause = clientWriteFuture.cause();
assertSame(clientException, actualCause);
}
private Future<Void> mySetupClientHostnameValidation(final SSLEngineTestParam param, File serverCrtFile,
File serverKeyFile,
File clientTrustCrtFile,
final boolean failureExpected)
throws SSLException, InterruptedException {
final String expectedHost = "localhost";
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(serverCrtFile, serverKeyFile, null)
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslContextProvider(serverSslContextProvider())
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.sessionTimeout(0)
.build());
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.sslContextProvider(clientSslContextProvider())
.trustManager(clientTrustCrtFile)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.sessionTimeout(0)
.build());
serverConnectedChannel = null;
sb = new ServerBootstrap();
cb = new Bootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
ChannelPipeline p = ch.pipeline();
SslHandler handler = !param.delegate ? serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
p.addLast(handler);
p.addLast(new MessageDelegatorChannelHandler(serverReceiver, serverLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
if (failureExpected) {
serverException = new IllegalStateException("handshake complete. expected failure");
}
serverLatch.countDown();
} else if (evt instanceof SslHandshakeCompletionEvent) {
serverException = ((SslHandshakeCompletionEvent) evt).cause();
serverLatch.countDown();
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
serverException = cause.getCause();
serverLatch.countDown();
} else {
serverException = cause;
ctx.fireExceptionCaught(cause);
}
}
});
serverConnectedChannel = ch;
}
});
final Promise<Void> clientWritePromise = ImmediateEventExecutor.INSTANCE.newPromise();
cb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
cb.channel(NioSocketChannel.class);
cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
ChannelPipeline p = ch.pipeline();
InetSocketAddress remoteAddress = (InetSocketAddress) serverChannel.localAddress();
SslHandler sslHandler = !param.delegate ?
clientSslCtx.newHandler(ch.alloc(), expectedHost, 0) :
clientSslCtx.newHandler(ch.alloc(), expectedHost, 0, delegatingExecutor);
SSLParameters parameters = sslHandler.engine().getSSLParameters();
if (SslUtils.isValidHostNameForSNI(expectedHost)) {
assertEquals(1, parameters.getServerNames().size());
assertEquals(new SNIHostName(expectedHost), parameters.getServerNames().get(0));
}
parameters.setEndpointIdentificationAlgorithm("HTTPS");
sslHandler.engine().setSSLParameters(parameters);
p.addLast(sslHandler);
p.addLast(new MessageDelegatorChannelHandler(clientReceiver, clientLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
// Only write if there is a failure expected. We don't actually care about the write going
// through we just want to verify the local failure condition. This way we don't have to worry
// about verifying the payload and releasing the content on the server side.
if (failureExpected) {
ChannelFuture f = ctx.write(ctx.alloc().buffer(1).writeByte(1));
PromiseNotifier.cascade(f, clientWritePromise);
}
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
if (failureExpected) {
clientException = new IllegalStateException("handshake complete. expected failure");
}
clientLatch.countDown();
} else if (evt instanceof SslHandshakeCompletionEvent) {
clientException = ((SslHandshakeCompletionEvent) evt).cause();
clientLatch.countDown();
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
clientException = cause.getCause();
clientLatch.countDown();
} else {
ctx.fireExceptionCaught(cause);
}
}
});
}
});
serverChannel = sb.bind(new InetSocketAddress(expectedHost, 0)).sync().channel();
final int port = ((InetSocketAddress) serverChannel.localAddress()).getPort();
ChannelFuture ccf = cb.connect(new InetSocketAddress(expectedHost, port));
assertTrue(ccf.awaitUninterruptibly().isSuccess());
clientChannel = ccf.channel();
return clientWritePromise;
}
private void mySetupMutualAuth(SSLEngineTestParam param, File keyFile, File crtFile, String keyPassword)
throws SSLException, InterruptedException {
mySetupMutualAuth(param, crtFile, keyFile, crtFile, keyPassword, crtFile, keyFile, crtFile, keyPassword);
}
private void verifySSLSessionForMutualAuth(
SSLEngineTestParam param, SSLSession session, File certFile, String principalName)
throws Exception {
InputStream in = null;
try {
assertEquals(principalName, session.getLocalPrincipal().getName());
assertEquals(principalName, session.getPeerPrincipal().getName());
assertNotNull(session.getId());
assertEquals(param.combo().cipher, session.getCipherSuite());
assertEquals(param.combo().protocol, session.getProtocol());
assertTrue(session.getApplicationBufferSize() > 0);
assertTrue(session.getCreationTime() > 0);
assertTrue(session.isValid());
assertTrue(session.getLastAccessedTime() > 0);
in = new FileInputStream(certFile);
final byte[] certBytes = SslContext.X509_CERT_FACTORY
.generateCertificate(in).getEncoded();
// Verify session
assertEquals(1, session.getPeerCertificates().length);
assertArrayEquals(certBytes, session.getPeerCertificates()[0].getEncoded());
try {
assertEquals(1, session.getPeerCertificateChain().length);
assertArrayEquals(certBytes, session.getPeerCertificateChain()[0].getEncoded());
} catch (UnsupportedOperationException e) {
// See https://bugs.openjdk.java.net/browse/JDK-8241039
assertTrue(PlatformDependent.javaVersion() >= 15);
}
assertEquals(1, session.getLocalCertificates().length);
assertArrayEquals(certBytes, session.getLocalCertificates()[0].getEncoded());
} finally {
if (in != null) {
in.close();
}
}
}
private void mySetupMutualAuth(final SSLEngineTestParam param,
File servertTrustCrtFile, File serverKeyFile, final File serverCrtFile, String serverKeyPassword,
File clientTrustCrtFile, File clientKeyFile, final File clientCrtFile, String clientKeyPassword)
throws InterruptedException, SSLException {
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(serverCrtFile, serverKeyFile, serverKeyPassword)
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.trustManager(servertTrustCrtFile)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.sessionTimeout(0).build());
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.trustManager(clientTrustCrtFile)
.keyManager(clientCrtFile, clientKeyFile, clientKeyPassword)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.endpointIdentificationAlgorithm(null)
.sessionTimeout(0).build());
serverConnectedChannel = null;
sb = new ServerBootstrap();
cb = new Bootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
ChannelPipeline p = ch.pipeline();
final SSLEngine engine = wrapEngine(serverSslCtx.newEngine(ch.alloc()));
engine.setUseClientMode(false);
engine.setNeedClientAuth(true);
p.addLast(new SslHandler(engine));
p.addLast(new MessageDelegatorChannelHandler(serverReceiver, serverLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
serverException = cause.getCause();
serverLatch.countDown();
} else {
serverException = cause;
ctx.fireExceptionCaught(cause);
}
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
try {
verifySSLSessionForMutualAuth(
param, engine.getSession(), serverCrtFile, PRINCIPAL_NAME);
} catch (Throwable cause) {
serverException = cause;
}
}
}
});
serverConnectedChannel = ch;
}
});
cb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
cb.channel(NioSocketChannel.class);
cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
final SslHandler handler = !param.delegate ?
clientSslCtx.newHandler(ch.alloc()) :
clientSslCtx.newHandler(ch.alloc(), delegatingExecutor);
handler.engine().setNeedClientAuth(true);
ChannelPipeline p = ch.pipeline();
p.addLast(handler);
p.addLast(new MessageDelegatorChannelHandler(clientReceiver, clientLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
try {
verifySSLSessionForMutualAuth(
param, handler.engine().getSession(), clientCrtFile, PRINCIPAL_NAME);
} catch (Throwable cause) {
clientException = cause;
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
clientException = cause.getCause();
clientLatch.countDown();
} else {
ctx.fireExceptionCaught(cause);
}
}
});
}
});
serverChannel = sb.bind(new InetSocketAddress(0)).sync().channel();
int port = ((InetSocketAddress) serverChannel.localAddress()).getPort();
ChannelFuture ccf = cb.connect(new InetSocketAddress(NetUtil.LOCALHOST, port));
assertTrue(ccf.awaitUninterruptibly().isSuccess());
clientChannel = ccf.channel();
}
protected void runTest(String expectedApplicationProtocol) throws Exception {
final ByteBuf clientMessage = Unpooled.copiedBuffer("I am a client".getBytes());
final ByteBuf serverMessage = Unpooled.copiedBuffer("I am a server".getBytes());
try {
writeAndVerifyReceived(clientMessage.retain(), clientChannel, serverLatch, serverReceiver);
writeAndVerifyReceived(serverMessage.retain(), serverConnectedChannel, clientLatch, clientReceiver);
verifyApplicationLevelProtocol(clientChannel, expectedApplicationProtocol);
verifyApplicationLevelProtocol(serverConnectedChannel, expectedApplicationProtocol);
} finally {
clientMessage.release();
serverMessage.release();
}
}
private static void verifyApplicationLevelProtocol(Channel channel, String expectedApplicationProtocol) {
SslHandler handler = channel.pipeline().get(SslHandler.class);
assertNotNull(handler);
String appProto = handler.applicationProtocol();
assertEquals(expectedApplicationProtocol, appProto);
SSLEngine engine = handler.engine();
if (engine instanceof JdkAlpnSslEngine) {
// Also verify the Java9 exposed method.
JdkAlpnSslEngine java9SslEngine = (JdkAlpnSslEngine) engine;
assertEquals(expectedApplicationProtocol == null ? StringUtil.EMPTY_STRING : expectedApplicationProtocol,
java9SslEngine.getApplicationProtocol());
}
}
private static void writeAndVerifyReceived(ByteBuf message, Channel sendChannel, CountDownLatch receiverLatch,
MessageReceiver receiver) throws Exception {
List<ByteBuf> dataCapture = null;
try {
assertTrue(sendChannel.writeAndFlush(message).await(10, TimeUnit.SECONDS));
receiverLatch.await(5, TimeUnit.SECONDS);
message.resetReaderIndex();
assertFalse(receiver.messages.isEmpty());
dataCapture = new ArrayList<ByteBuf>();
receiver.messages.drainTo(dataCapture);
assertEquals(message, dataCapture.get(0));
} finally {
if (dataCapture != null) {
for (ByteBuf data : dataCapture) {
data.release();
}
}
}
}
@Test
public void testGetCreationTime() throws Exception {
clientSslCtx = wrapContext(null, SslContextBuilder.forClient()
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider()).build());
SSLEngine engine = null;
try {
engine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
assertTrue(engine.getSession().getCreationTime() <= System.currentTimeMillis());
} finally {
cleanupClientSslEngine(engine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSessionInvalidate(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
SSLSession session = serverEngine.getSession();
assertTrue(session.isValid(), () -> "session should be valid: " + session);
session.invalidate();
assertFalse(session.isValid(), () -> "session should be invalid: " + session);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSSLSessionId(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
// This test only works for non TLSv1.3 for now
.protocols(param.protocols())
.sslContextProvider(clientSslContextProvider())
.build());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
// This test only works for non TLSv1.3 for now
.protocols(param.protocols())
.sslContextProvider(serverSslContextProvider())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
// Before the handshake the id should have length == 0
assertEquals(0, clientEngine.getSession().getId().length);
assertEquals(0, serverEngine.getSession().getId().length);
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
if (param.protocolCipherCombo == ProtocolCipherCombo.TLSV13) {
// Allocate something which is big enough for sure
ByteBuffer packetBuffer = allocateBuffer(param.type(), 32 * 1024);
ByteBuffer appBuffer = allocateBuffer(param.type(), 32 * 1024);
appBuffer.clear().position(4).flip();
packetBuffer.clear();
do {
SSLEngineResult result;
do {
result = serverEngine.wrap(appBuffer, packetBuffer);
} while (appBuffer.hasRemaining() || result.bytesProduced() > 0);
appBuffer.clear();
packetBuffer.flip();
do {
result = clientEngine.unwrap(packetBuffer, appBuffer);
} while (packetBuffer.hasRemaining() || result.bytesProduced() > 0);
packetBuffer.clear();
appBuffer.clear().position(4).flip();
do {
result = clientEngine.wrap(appBuffer, packetBuffer);
} while (appBuffer.hasRemaining() || result.bytesProduced() > 0);
appBuffer.clear();
packetBuffer.flip();
do {
result = serverEngine.unwrap(packetBuffer, appBuffer);
} while (packetBuffer.hasRemaining() || result.bytesProduced() > 0);
packetBuffer.clear();
appBuffer.clear().position(4).flip();
} while (clientEngine.getSession().getId().length == 0);
// With TLS1.3 we should see pseudo IDs and so these should never match.
assertFalse(Arrays.equals(clientEngine.getSession().getId(), serverEngine.getSession().getId()));
} else if (OpenSslEngineTestParam.isUsingTickets(param)) {
// After the handshake the client should have ticket ids
assertNotEquals(0, clientEngine.getSession().getId().length);
} else {
// After the handshake the id should have length > 0
assertNotEquals(0, clientEngine.getSession().getId().length);
assertNotEquals(0, serverEngine.getSession().getId().length);
assertArrayEquals(clientEngine.getSession().getId(), serverEngine.getSession().getId());
}
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
@Timeout(30)
public void clientInitiatedRenegotiationWithFatalAlertDoesNotInfiniteLoopServer(final SSLEngineTestParam param)
throws Exception {
assumeTrue(PlatformDependent.javaVersion() >= 11);
final SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
sb = new ServerBootstrap()
.group(new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory()))
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type));
ChannelPipeline p = ch.pipeline();
SslHandler handler = !param.delegate ?
serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
p.addLast(handler);
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof SslHandshakeCompletionEvent &&
((SslHandshakeCompletionEvent) evt).isSuccess()) {
// This data will be sent to the client before any of the re-negotiation data can be
// sent. The client will read this, detect that it is not the response to
// renegotiation which was expected, and respond with a fatal alert.
ctx.writeAndFlush(ctx.alloc().buffer(1).writeByte(100));
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
// The server then attempts to trigger a flush operation once the application data is
// received from the client. The flush will encrypt all data and should not result in
// deadlock.
ctx.channel().eventLoop().schedule(new Runnable() {
@Override
public void run() {
ctx.writeAndFlush(ctx.alloc().buffer(1).writeByte(101));
}
}, 500, TimeUnit.MILLISECONDS);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
serverLatch.countDown();
}
});
serverConnectedChannel = ch;
}
});
serverChannel = sb.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
// OpenSslEngine doesn't support renegotiation on client side
.sslProvider(SslProvider.JDK)
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
cb = new Bootstrap();
cb.group(new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory()))
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type()));
ChannelPipeline p = ch.pipeline();
SslHandler sslHandler = !param.delegate ?
clientSslCtx.newHandler(ch.alloc()) :
clientSslCtx.newHandler(ch.alloc(), delegatingExecutor);
// The renegotiate is not expected to succeed, so we should stop trying in a timely manner so
// the unit test can terminate relativley quicly.
sslHandler.setHandshakeTimeout(1, TimeUnit.SECONDS);
p.addLast(sslHandler);
p.addLast(new ChannelInboundHandlerAdapter() {
private int handshakeCount;
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
// OpenSSL SSLEngine sends a fatal alert for the renegotiation handshake because the
// user data read as part of the handshake. The client receives this fatal alert and is
// expected to shutdown the connection. The "invalid data" during the renegotiation
// handshake is also delivered to channelRead(..) on the server.
// JDK SSLEngine completes the renegotiation handshake and delivers the "invalid data"
// is also delivered to channelRead(..) on the server. JDK SSLEngine does not send a
// fatal error and so for testing purposes we close the connection after we have
// completed the first renegotiation handshake (which is the second handshake).
if (evt instanceof SslHandshakeCompletionEvent && ++handshakeCount == 2) {
ctx.close();
return;
}
ctx.fireUserEventTriggered(evt);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
// Simulate a request that the server's application logic will think is invalid.
ctx.writeAndFlush(ctx.alloc().buffer(1).writeByte(102));
ctx.pipeline().get(SslHandler.class).renegotiate();
}
});
}
});
ChannelFuture ccf = cb.connect(serverChannel.localAddress());
assertTrue(ccf.syncUninterruptibly().isSuccess());
clientChannel = ccf.channel();
serverLatch.await();
}
protected void testEnablingAnAlreadyDisabledSslProtocol(SSLEngineTestParam param,
String[] protocols1, String[] protocols2) throws Exception {
SSLEngine sslEngine = null;
try {
File serverKeyFile = ResourcesUtil.getFile(getClass(), "test_unencrypted.pem");
File serverCrtFile = ResourcesUtil.getFile(getClass(), "test.crt");
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(serverCrtFile, serverKeyFile)
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
sslEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
// Disable all protocols
sslEngine.setEnabledProtocols(EmptyArrays.EMPTY_STRINGS);
// The only protocol that should be enabled is SSLv2Hello
String[] enabledProtocols = sslEngine.getEnabledProtocols();
assertArrayEquals(protocols1, enabledProtocols);
// Enable a protocol that is currently disabled
sslEngine.setEnabledProtocols(new String[]{ SslProtocols.TLS_v1_2 });
// The protocol that was just enabled should be returned
enabledProtocols = sslEngine.getEnabledProtocols();
assertEquals(protocols2.length, enabledProtocols.length);
assertArrayEquals(protocols2, enabledProtocols);
} finally {
if (sslEngine != null) {
sslEngine.closeInbound();
sslEngine.closeOutbound();
cleanupServerSslEngine(sslEngine);
}
}
}
protected void handshake(BufferType type, boolean delegate, SSLEngine clientEngine, SSLEngine serverEngine)
throws Exception {
ByteBuffer cTOs = allocateBuffer(type, clientEngine.getSession().getPacketBufferSize());
ByteBuffer sTOc = allocateBuffer(type, serverEngine.getSession().getPacketBufferSize());
ByteBuffer serverAppReadBuffer = allocateBuffer(type, serverEngine.getSession().getApplicationBufferSize());
ByteBuffer clientAppReadBuffer = allocateBuffer(type, clientEngine.getSession().getApplicationBufferSize());
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, clientEngine.getHandshakeStatus());
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, serverEngine.getHandshakeStatus());
clientEngine.beginHandshake();
serverEngine.beginHandshake();
ByteBuffer empty = allocateBuffer(type, 0);
SSLEngineResult clientResult;
SSLEngineResult serverResult;
boolean clientHandshakeFinished = false;
boolean serverHandshakeFinished = false;
boolean cTOsHasRemaining;
boolean sTOcHasRemaining;
do {
int cTOsPos = cTOs.position();
int sTOcPos = sTOc.position();
if (!clientHandshakeFinished) {
clientResult = clientEngine.wrap(empty, cTOs);
runDelegatedTasks(delegate, clientResult, clientEngine);
assertEquals(empty.remaining(), clientResult.bytesConsumed());
assertEquals(cTOs.position() - cTOsPos, clientResult.bytesProduced());
clientHandshakeFinished = assertHandshakeStatus(clientEngine, clientResult);
if (clientResult.getStatus() == Status.BUFFER_OVERFLOW) {
cTOs = increaseDstBuffer(clientEngine.getSession().getPacketBufferSize(), type, cTOs);
}
}
if (!serverHandshakeFinished) {
serverResult = serverEngine.wrap(empty, sTOc);
runDelegatedTasks(delegate, serverResult, serverEngine);
assertEquals(empty.remaining(), serverResult.bytesConsumed());
assertEquals(sTOc.position() - sTOcPos, serverResult.bytesProduced());
serverHandshakeFinished = assertHandshakeStatus(serverEngine, serverResult);
if (serverResult.getStatus() == Status.BUFFER_OVERFLOW) {
sTOc = increaseDstBuffer(serverEngine.getSession().getPacketBufferSize(), type, sTOc);
}
}
cTOs.flip();
sTOc.flip();
cTOsPos = cTOs.position();
sTOcPos = sTOc.position();
if (!clientHandshakeFinished ||
// After the handshake completes it is possible we have more data that was send by the server as
// the server will send session updates after the handshake. In this case continue to unwrap.
SslProtocols.TLS_v1_3.equals(clientEngine.getSession().getProtocol())) {
if (sTOc.hasRemaining() ||
// We need to special case conscrypt due a bug.
Conscrypt.isEngineSupported(clientEngine)) {
int clientAppReadBufferPos = clientAppReadBuffer.position();
clientResult = clientEngine.unwrap(sTOc, clientAppReadBuffer);
runDelegatedTasks(delegate, clientResult, clientEngine);
assertEquals(sTOc.position() - sTOcPos, clientResult.bytesConsumed());
assertEquals(clientAppReadBuffer.position() - clientAppReadBufferPos, clientResult.bytesProduced());
assertEquals(0, clientAppReadBuffer.position());
if (assertHandshakeStatus(clientEngine, clientResult)) {
clientHandshakeFinished = true;
}
if (clientResult.getStatus() == Status.BUFFER_OVERFLOW) {
clientAppReadBuffer = increaseDstBuffer(
clientEngine.getSession().getApplicationBufferSize(), type, clientAppReadBuffer);
}
}
} else {
assertEquals(0, sTOc.remaining());
}
if (!serverHandshakeFinished) {
if (cTOs.hasRemaining() ||
// We need to special case conscrypt due a bug.
Conscrypt.isEngineSupported(serverEngine)) {
int serverAppReadBufferPos = serverAppReadBuffer.position();
serverResult = serverEngine.unwrap(cTOs, serverAppReadBuffer);
runDelegatedTasks(delegate, serverResult, serverEngine);
assertEquals(cTOs.position() - cTOsPos, serverResult.bytesConsumed());
assertEquals(serverAppReadBuffer.position() - serverAppReadBufferPos, serverResult.bytesProduced());
assertEquals(0, serverAppReadBuffer.position());
serverHandshakeFinished = assertHandshakeStatus(serverEngine, serverResult);
if (serverResult.getStatus() == Status.BUFFER_OVERFLOW) {
serverAppReadBuffer = increaseDstBuffer(
serverEngine.getSession().getApplicationBufferSize(), type, serverAppReadBuffer);
}
}
} else {
assertFalse(cTOs.hasRemaining());
}
cTOsHasRemaining = compactOrClear(cTOs);
sTOcHasRemaining = compactOrClear(sTOc);
serverAppReadBuffer.clear();
clientAppReadBuffer.clear();
} while (!clientHandshakeFinished || !serverHandshakeFinished ||
// We need to ensure we feed all the data to the engine to not end up with a corrupted state.
// This is especially important with TLS1.3 which may produce sessions after the "main handshake" is
// done
cTOsHasRemaining || sTOcHasRemaining);
}
private static boolean compactOrClear(ByteBuffer buffer) {
if (buffer.hasRemaining()) {
buffer.compact();
return true;
}
buffer.clear();
return false;
}
private ByteBuffer increaseDstBuffer(int maxBufferSize,
BufferType type, ByteBuffer dstBuffer) {
assumeFalse(maxBufferSize == dstBuffer.remaining());
// We need to increase the destination buffer
dstBuffer.flip();
ByteBuffer tmpBuffer = allocateBuffer(type, maxBufferSize + dstBuffer.remaining());
tmpBuffer.put(dstBuffer);
return tmpBuffer;
}
private static boolean assertHandshakeStatus(SSLEngine engine, SSLEngineResult result) {
if (result.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.FINISHED) {
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, engine.getHandshakeStatus());
return true;
}
return false;
}
private void runDelegatedTasks(boolean delegate, SSLEngineResult result, SSLEngine engine) {
if (result.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) {
for (;;) {
Runnable task = engine.getDelegatedTask();
if (task == null) {
break;
}
if (!delegate) {
task.run();
} else {
delegatingExecutor.execute(task);
}
}
}
}
protected abstract SslProvider sslClientProvider();
protected abstract SslProvider sslServerProvider();
protected Provider clientSslContextProvider() {
return null;
}
protected Provider serverSslContextProvider() {
return null;
}
/**
* Called from the test cleanup code and can be used to release the {@code ctx} if it must be done manually.
*/
protected void cleanupClientSslContext(SslContext ctx) {
ReferenceCountUtil.release(ctx);
}
/**
* Called from the test cleanup code and can be used to release the {@code ctx} if it must be done manually.
*/
protected void cleanupServerSslContext(SslContext ctx) {
ReferenceCountUtil.release(ctx);
}
/**
* Called when ever an SSLEngine is not wrapped by a {@link SslHandler} and inserted into a pipeline.
*/
protected void cleanupClientSslEngine(SSLEngine engine) {
ReferenceCountUtil.release(unwrapEngine(engine));
}
/**
* Called when ever an SSLEngine is not wrapped by a {@link SslHandler} and inserted into a pipeline.
*/
protected void cleanupServerSslEngine(SSLEngine engine) {
ReferenceCountUtil.release(unwrapEngine(engine));
}
private static SSLEngine unwrapEngine(SSLEngine engine) {
if (engine instanceof JdkSslEngine) {
return ((JdkSslEngine) engine).getWrappedEngine();
}
return engine;
}
protected void setupHandlers(SSLEngineTestParam param, ApplicationProtocolConfig apn)
throws InterruptedException, SSLException, CertificateException {
setupHandlers(param, apn, apn);
}
protected void setupHandlers(SSLEngineTestParam param,
ApplicationProtocolConfig serverApn, ApplicationProtocolConfig clientApn)
throws InterruptedException, SSLException, CertificateException {
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder serverCtxBuilder = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey(), null)
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.applicationProtocolConfig(serverApn)
.sessionCacheSize(0)
.sessionTimeout(0);
if (serverApn.protocol() == Protocol.NPN || serverApn.protocol() == Protocol.NPN_AND_ALPN) {
// NPN is not really well supported with TLSv1.3 so force to use TLSv1.2
// See https://github.com/openssl/openssl/issues/3665
serverCtxBuilder.protocols(SslProtocols.TLS_v1_2);
}
SslContextBuilder clientCtxBuilder = SslContextBuilder.forClient()
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.applicationProtocolConfig(clientApn)
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
.sessionCacheSize(0)
.sessionTimeout(0);
if (clientApn.protocol() == Protocol.NPN || clientApn.protocol() == Protocol.NPN_AND_ALPN) {
// NPN is not really well supported with TLSv1.3 so force to use TLSv1.2
// See https://github.com/openssl/openssl/issues/3665
clientCtxBuilder.protocols(SslProtocols.TLS_v1_2);
}
setupHandlers(param.type(), param.delegate(),
wrapContext(param, serverCtxBuilder.build()), wrapContext(param, clientCtxBuilder.build()));
}
protected void setupHandlers(final BufferType type, final boolean delegate,
SslContext serverCtx, SslContext clientCtx)
throws InterruptedException, SSLException, CertificateException {
serverSslCtx = serverCtx;
clientSslCtx = clientCtx;
setupServer(type, delegate);
setupClient(type, delegate, null, 0);
ChannelFuture ccf = cb.connect(serverChannel.localAddress());
assertTrue(ccf.syncUninterruptibly().isSuccess());
clientChannel = ccf.channel();
}
private void setupServer(final BufferType type, final boolean delegate) {
serverConnectedChannel = null;
sb = new ServerBootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), type));
ChannelPipeline p = ch.pipeline();
SslHandler sslHandler = !delegate ?
serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
serverSslHandshakeFuture = sslHandler.handshakeFuture();
p.addLast(sslHandler);
p.addLast(new MessageDelegatorChannelHandler(serverReceiver, serverLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
serverException = cause.getCause();
serverLatch.countDown();
} else {
ctx.fireExceptionCaught(cause);
}
}
});
serverConnectedChannel = ch;
}
});
serverChannel = sb.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
}
private void setupClient(final BufferType type, final boolean delegate, final String host, final int port) {
cb = new Bootstrap();
cb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
cb.channel(NioSocketChannel.class);
cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
TestByteBufAllocator alloc = new TestByteBufAllocator(ch.config().getAllocator(), type);
ch.config().setAllocator(alloc);
ChannelPipeline p = ch.pipeline();
final SslHandler sslHandler;
if (!delegate) {
sslHandler = host != null ? clientSslCtx.newHandler(alloc, host, port) :
clientSslCtx.newHandler(alloc);
} else {
sslHandler = host != null ? clientSslCtx.newHandler(alloc, host, port, delegatingExecutor) :
clientSslCtx.newHandler(alloc, delegatingExecutor);
}
clientSslHandshakeFuture = sslHandler.handshakeFuture();
p.addLast(sslHandler);
p.addLast(new MessageDelegatorChannelHandler(clientReceiver, clientLatch));
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause.getCause() instanceof SSLHandshakeException) {
clientException = cause.getCause();
clientLatch.countDown();
} else {
ctx.fireExceptionCaught(cause);
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
clientLatch.countDown();
}
});
}
});
}
@MethodSource("newTestParams")
@ParameterizedTest
@Timeout(30)
public void testMutualAuthSameCertChain(final SSLEngineTestParam param) throws Exception {
SelfSignedCertificate serverCert = new SelfSignedCertificate();
SelfSignedCertificate clientCert = new SelfSignedCertificate();
serverSslCtx =
wrapContext(param, SslContextBuilder.forServer(serverCert.certificate(), serverCert.privateKey())
.trustManager(clientCert.cert())
.clientAuth(ClientAuth.REQUIRE).sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers()).build());
sb = new ServerBootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
final Promise<String> promise = sb.config().group().next().newPromise();
serverChannel = sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type()));
SslHandler sslHandler = !param.delegate ?
serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
ch.pipeline().addFirst(sslHandler);
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof SslHandshakeCompletionEvent) {
Throwable cause = ((SslHandshakeCompletionEvent) evt).cause();
if (cause == null) {
SSLSession session = ((SslHandler) ctx.pipeline().first()).engine().getSession();
Certificate[] peerCertificates = session.getPeerCertificates();
if (peerCertificates == null) {
promise.setFailure(new NullPointerException("peerCertificates"));
return;
}
try {
X509Certificate[] peerCertificateChain = session.getPeerCertificateChain();
if (peerCertificateChain == null) {
promise.setFailure(new NullPointerException("peerCertificateChain"));
} else if (peerCertificateChain.length + peerCertificates.length != 4) {
String excTxtFmt = "peerCertificateChain.length:%s, peerCertificates.length:%s";
promise.setFailure(new IllegalStateException(String.format(excTxtFmt,
peerCertificateChain.length,
peerCertificates.length)));
} else {
for (int i = 0; i < peerCertificateChain.length; i++) {
if (peerCertificateChain[i] == null || peerCertificates[i] == null) {
promise.setFailure(
new IllegalStateException("Certificate in chain is null"));
return;
}
}
promise.setSuccess(null);
}
} catch (UnsupportedOperationException e) {
// See https://bugs.openjdk.java.net/browse/JDK-8241039
assertTrue(PlatformDependent.javaVersion() >= 15);
assertEquals(2, peerCertificates.length);
for (int i = 0; i < peerCertificates.length; i++) {
if (peerCertificates[i] == null) {
promise.setFailure(
new IllegalStateException("Certificate in chain is null"));
return;
}
}
promise.setSuccess(null);
}
} else {
promise.setFailure(cause);
}
}
}
});
serverConnectedChannel = ch;
}
}).bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
// We create a new chain for certificates which contains 2 certificates
ByteArrayOutputStream chainStream = new ByteArrayOutputStream();
chainStream.write(Files.readAllBytes(clientCert.certificate().toPath()));
chainStream.write(Files.readAllBytes(serverCert.certificate().toPath()));
clientSslCtx = wrapContext(param, SslContextBuilder.forClient().keyManager(
new ByteArrayInputStream(chainStream.toByteArray()),
new FileInputStream(clientCert.privateKey()))
.trustManager(new FileInputStream(serverCert.certificate()))
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.endpointIdentificationAlgorithm(null)
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
cb = new Bootstrap();
cb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
cb.channel(NioSocketChannel.class);
clientChannel = cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type()));
ch.pipeline().addLast(new SslHandler(wrapEngine(clientSslCtx.newEngine(ch.alloc()))));
}
}).connect(serverChannel.localAddress()).syncUninterruptibly().channel();
promise.syncUninterruptibly();
serverCert.delete();
clientCert.delete();
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testUnwrapBehavior(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
byte[] bytes = "Hello World".getBytes(CharsetUtil.US_ASCII);
try {
ByteBuffer plainClientOut = allocateBuffer(param.type, client.getSession().getApplicationBufferSize());
ByteBuffer encryptedClientToServer = allocateBuffer(
param.type, server.getSession().getPacketBufferSize() * 2);
ByteBuffer plainServerIn = allocateBuffer(param.type, server.getSession().getApplicationBufferSize());
handshake(param.type(), param.delegate(), client, server);
// create two TLS frames
// first frame
plainClientOut.put(bytes, 0, 5);
plainClientOut.flip();
SSLEngineResult result = client.wrap(plainClientOut, encryptedClientToServer);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(5, result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
assertFalse(plainClientOut.hasRemaining());
// second frame
plainClientOut.clear();
plainClientOut.put(bytes, 5, 6);
plainClientOut.flip();
result = client.wrap(plainClientOut, encryptedClientToServer);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(6, result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
// send over to server
encryptedClientToServer.flip();
// try with too small output buffer first (to check BUFFER_OVERFLOW case)
int remaining = encryptedClientToServer.remaining();
ByteBuffer small = allocateBuffer(param.type, 3);
result = server.unwrap(encryptedClientToServer, small);
assertEquals(SSLEngineResult.Status.BUFFER_OVERFLOW, result.getStatus());
assertEquals(remaining, encryptedClientToServer.remaining());
// now with big enough buffer
result = server.unwrap(encryptedClientToServer, plainServerIn);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(5, result.bytesProduced());
assertTrue(encryptedClientToServer.hasRemaining());
result = server.unwrap(encryptedClientToServer, plainServerIn);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(6, result.bytesProduced());
assertFalse(encryptedClientToServer.hasRemaining());
plainServerIn.flip();
assertEquals(ByteBuffer.wrap(bytes), plainServerIn);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testProtocolMatch(SSLEngineTestParam param) throws Exception {
testProtocol(param, false, new String[] {"TLSv1.2"}, new String[] {"TLSv1", "TLSv1.1", "TLSv1.2"});
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testProtocolNoMatch(SSLEngineTestParam param) throws Exception {
testProtocol(param, true, new String[] {"TLSv1.2"}, new String[] {"TLSv1", "TLSv1.1"});
}
private void testProtocol(final SSLEngineTestParam param, boolean handshakeFails,
String[] clientProtocols, String[] serverProtocols)
throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslProvider(sslClientProvider())
.protocols(clientProtocols)
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslProvider(sslServerProvider())
.protocols(serverProtocols)
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
if (handshakeFails) {
final SSLEngine clientEngine = client;
final SSLEngine serverEngine = server;
assertThrows(SSLHandshakeException.class, new Executable() {
@Override
public void execute() throws Throwable {
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
}
});
} else {
handshake(param.type(), param.delegate(), client, server);
}
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
private String[] nonContiguousProtocols(SslProvider provider) {
if (provider != null) {
// conscrypt not correctly filters out TLSv1 and TLSv1.1 which is required now by the JDK.
// https://github.com/google/conscrypt/issues/1013
return new String[] { SslProtocols.TLS_v1_2 };
}
return new String[] {SslProtocols.TLS_v1_2, SslProtocols.TLS_v1};
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testHandshakeCompletesWithNonContiguousProtocolsTLSv1_2CipherOnly(SSLEngineTestParam param)
throws Exception {
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
// Select a mandatory cipher from the TLSv1.2 RFC https://www.ietf.org/rfc/rfc5246.txt so handshakes won't fail
// due to no shared/supported cipher.
final String sharedCipher =
// JDK24+ does not support TLS_RSA_* ciphers by default anymore:
// See https://www.java.com/en/configure_crypto.html
PlatformDependent.javaVersion() >= 24 ? "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" :
"TLS_RSA_WITH_AES_128_CBC_SHA";
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.ciphers(Collections.singletonList(sharedCipher))
.protocols(nonContiguousProtocols(sslClientProvider()))
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.build());
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.ciphers(Collections.singletonList(sharedCipher))
.protocols(nonContiguousProtocols(sslServerProvider()))
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testHandshakeCompletesWithoutFilteringSupportedCipher(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
// Select a mandatory cipher from the TLSv1.2 RFC https://www.ietf.org/rfc/rfc5246.txt so handshakes won't fail
// due to no shared/supported cipher.
final String sharedCipher =
// JDK24+ does not support TLS_RSA_* ciphers by default anymore:
// See https://www.java.com/en/configure_crypto.html
PlatformDependent.javaVersion() >= 24 ? "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" :
"TLS_RSA_WITH_AES_128_CBC_SHA";
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.ciphers(Collections.singletonList(sharedCipher), SupportedCipherSuiteFilter.INSTANCE)
.protocols(nonContiguousProtocols(sslClientProvider()))
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.build());
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.ciphers(Collections.singletonList(sharedCipher), SupportedCipherSuiteFilter.INSTANCE)
.protocols(nonContiguousProtocols(sslServerProvider()))
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testPacketBufferSizeLimit(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
// Allocate an buffer that is bigger then the max plain record size.
ByteBuffer plainServerOut = allocateBuffer(
param.type(), server.getSession().getApplicationBufferSize() * 2);
handshake(param.type(), param.delegate(), client, server);
// Fill the whole buffer and flip it.
plainServerOut.position(plainServerOut.capacity());
plainServerOut.flip();
ByteBuffer encryptedServerToClient = allocateBuffer(
param.type(), server.getSession().getPacketBufferSize());
int encryptedServerToClientPos = encryptedServerToClient.position();
int plainServerOutPos = plainServerOut.position();
SSLEngineResult result = server.wrap(plainServerOut, encryptedServerToClient);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(plainServerOut.position() - plainServerOutPos, result.bytesConsumed());
assertEquals(encryptedServerToClient.position() - encryptedServerToClientPos, result.bytesProduced());
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSSLEngineUnwrapNoSslRecord(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
final SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
final ByteBuffer src = allocateBuffer(param.type(), client.getSession().getApplicationBufferSize());
final ByteBuffer dst = allocateBuffer(param.type(), client.getSession().getPacketBufferSize());
ByteBuffer empty = allocateBuffer(param.type(), 0);
SSLEngineResult clientResult = client.wrap(empty, dst);
assertEquals(SSLEngineResult.Status.OK, clientResult.getStatus());
assertEquals(SSLEngineResult.HandshakeStatus.NEED_UNWRAP, clientResult.getHandshakeStatus());
assertThrows(SSLException.class, new Executable() {
@Override
public void execute() throws Throwable {
client.unwrap(src, dst);
}
});
} finally {
cleanupClientSslEngine(client);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testBeginHandshakeAfterEngineClosed(SSLEngineTestParam param) throws SSLException {
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
client.closeInbound();
client.closeOutbound();
try {
client.beginHandshake();
fail();
} catch (SSLException expected) {
// expected
} catch (IllegalStateException e) {
if (!Conscrypt.isEngineSupported(client)) {
throw e;
}
// Workaround for conscrypt bug
// See https://github.com/google/conscrypt/issues/840
}
} finally {
cleanupClientSslEngine(client);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testBeginHandshakeCloseOutbound(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
testBeginHandshakeCloseOutbound(param, client);
testBeginHandshakeCloseOutbound(param, server);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
private void testBeginHandshakeCloseOutbound(SSLEngineTestParam param, SSLEngine engine) throws SSLException {
ByteBuffer dst = allocateBuffer(param.type(), engine.getSession().getPacketBufferSize());
ByteBuffer empty = allocateBuffer(param.type(), 0);
engine.beginHandshake();
engine.closeOutbound();
SSLEngineResult result;
for (;;) {
result = engine.wrap(empty, dst);
dst.flip();
assertEquals(0, result.bytesConsumed());
assertEquals(dst.remaining(), result.bytesProduced());
if (result.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.NEED_WRAP) {
break;
}
dst.clear();
}
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testCloseInboundAfterBeginHandshake(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
testCloseInboundAfterBeginHandshake(client);
testCloseInboundAfterBeginHandshake(server);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
private static void testCloseInboundAfterBeginHandshake(SSLEngine engine) throws SSLException {
engine.beginHandshake();
try {
engine.closeInbound();
// Workaround for conscrypt bug
// See https://github.com/google/conscrypt/issues/839
if (!Conscrypt.isEngineSupported(engine)) {
fail();
}
} catch (SSLException expected) {
// expected
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testCloseNotifySequence(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.endpointIdentificationAlgorithm(null)
// This test only works for non TLSv1.3 for now
.protocols(SslProtocols.TLS_v1_2)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
// This test only works for non TLSv1.3 for now
.protocols(SslProtocols.TLS_v1_2)
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
ByteBuffer plainClientOut = allocateBuffer(param.type(), client.getSession().getApplicationBufferSize());
ByteBuffer plainServerOut = allocateBuffer(param.type(), server.getSession().getApplicationBufferSize());
ByteBuffer encryptedClientToServer =
allocateBuffer(param.type(), client.getSession().getPacketBufferSize());
ByteBuffer encryptedServerToClient =
allocateBuffer(param.type(), server.getSession().getPacketBufferSize());
ByteBuffer empty = allocateBuffer(param.type(), 0);
handshake(param.type(), param.delegate(), client, server);
// This will produce a close_notify
client.closeOutbound();
// Something still pending in the outbound buffer.
assertFalse(client.isOutboundDone());
assertFalse(client.isInboundDone());
// Now wrap and so drain the outbound buffer.
SSLEngineResult result = client.wrap(empty, encryptedClientToServer);
encryptedClientToServer.flip();
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
SSLEngineResult.HandshakeStatus hs = result.getHandshakeStatus();
// Need an UNWRAP to read the response of the close_notify
if (sslClientProvider() == SslProvider.JDK || Conscrypt.isEngineSupported(client)) {
assertTrue(hs == SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING
|| hs == SSLEngineResult.HandshakeStatus.NEED_UNWRAP);
} else {
assertEquals(SSLEngineResult.HandshakeStatus.NEED_UNWRAP, hs);
}
int produced = result.bytesProduced();
int consumed = result.bytesConsumed();
int closeNotifyLen = produced;
assertTrue(produced > 0);
assertEquals(0, consumed);
assertEquals(produced, encryptedClientToServer.remaining());
// Outbound buffer should be drained now.
assertTrue(client.isOutboundDone());
assertFalse(client.isInboundDone());
assertFalse(server.isOutboundDone());
assertFalse(server.isInboundDone());
result = server.unwrap(encryptedClientToServer, plainServerOut);
plainServerOut.flip();
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
// Need a WRAP to respond to the close_notify
assertEquals(SSLEngineResult.HandshakeStatus.NEED_WRAP, result.getHandshakeStatus());
produced = result.bytesProduced();
consumed = result.bytesConsumed();
assertEquals(closeNotifyLen, consumed);
assertEquals(0, produced);
// Should have consumed the complete close_notify
assertEquals(0, encryptedClientToServer.remaining());
assertEquals(0, plainServerOut.remaining());
assertFalse(server.isOutboundDone());
assertTrue(server.isInboundDone());
result = server.wrap(empty, encryptedServerToClient);
encryptedServerToClient.flip();
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
// UNWRAP/WRAP are not expected after this point
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, result.getHandshakeStatus());
produced = result.bytesProduced();
consumed = result.bytesConsumed();
assertEquals(closeNotifyLen, produced);
assertEquals(0, consumed);
assertEquals(produced, encryptedServerToClient.remaining());
assertTrue(server.isOutboundDone());
assertTrue(server.isInboundDone());
result = client.unwrap(encryptedServerToClient, plainClientOut);
plainClientOut.flip();
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
// UNWRAP/WRAP are not expected after this point
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, result.getHandshakeStatus());
produced = result.bytesProduced();
consumed = result.bytesConsumed();
assertEquals(closeNotifyLen, consumed);
assertEquals(0, produced);
assertEquals(0, encryptedServerToClient.remaining());
assertTrue(client.isOutboundDone());
assertTrue(client.isInboundDone());
// Ensure that calling wrap or unwrap again will not produce an SSLException
encryptedServerToClient.clear();
plainServerOut.clear();
result = server.wrap(plainServerOut, encryptedServerToClient);
assertEngineRemainsClosed(result);
encryptedClientToServer.clear();
plainServerOut.clear();
result = server.unwrap(encryptedClientToServer, plainServerOut);
assertEngineRemainsClosed(result);
encryptedClientToServer.clear();
plainClientOut.clear();
result = client.wrap(plainClientOut, encryptedClientToServer);
assertEngineRemainsClosed(result);
encryptedServerToClient.clear();
plainClientOut.clear();
result = client.unwrap(encryptedServerToClient, plainClientOut);
assertEngineRemainsClosed(result);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
private static void assertEngineRemainsClosed(SSLEngineResult result) {
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
assertEquals(SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING, result.getHandshakeStatus());
assertEquals(0, result.bytesConsumed());
assertEquals(0, result.bytesProduced());
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testWrapAfterCloseOutbound(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
ByteBuffer dst = allocateBuffer(param.type(), client.getSession().getPacketBufferSize());
ByteBuffer src = allocateBuffer(param.type(), 1024);
handshake(param.type(), param.delegate(), client, server);
// This will produce a close_notify
client.closeOutbound();
SSLEngineResult result = client.wrap(src, dst);
assertEquals(SSLEngineResult.Status.CLOSED, result.getStatus());
assertEquals(0, result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
assertTrue(client.isOutboundDone());
assertFalse(client.isInboundDone());
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMultipleRecordsInOneBufferWithNonZeroPosition(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
// Choose buffer size small enough that we can put multiple buffers into one buffer and pass it into the
// unwrap call without exceed MAX_ENCRYPTED_PACKET_LENGTH.
ByteBuffer plainClientOut = allocateBuffer(param.type(), 1024);
ByteBuffer plainServerOut = allocateBuffer(param.type(), server.getSession().getApplicationBufferSize());
ByteBuffer encClientToServer = allocateBuffer(param.type(), client.getSession().getPacketBufferSize());
int positionOffset = 1;
// We need to be able to hold 2 records + positionOffset
ByteBuffer combinedEncClientToServer = allocateBuffer(
param.type(), encClientToServer.capacity() * 2 + positionOffset);
combinedEncClientToServer.position(positionOffset);
handshake(param.type(), param.delegate(), client, server);
plainClientOut.limit(plainClientOut.capacity());
SSLEngineResult result = client.wrap(plainClientOut, encClientToServer);
assertEquals(plainClientOut.capacity(), result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
encClientToServer.flip();
// Copy the first record into the combined buffer
combinedEncClientToServer.put(encClientToServer);
plainClientOut.clear();
encClientToServer.clear();
result = client.wrap(plainClientOut, encClientToServer);
assertEquals(plainClientOut.capacity(), result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
encClientToServer.flip();
int encClientToServerLen = encClientToServer.remaining();
// Copy the first record into the combined buffer
combinedEncClientToServer.put(encClientToServer);
encClientToServer.clear();
combinedEncClientToServer.flip();
combinedEncClientToServer.position(positionOffset);
// Ensure we have the first record and a tiny amount of the second record in the buffer
combinedEncClientToServer.limit(
combinedEncClientToServer.limit() - (encClientToServerLen - positionOffset));
result = server.unwrap(combinedEncClientToServer, plainServerOut);
assertEquals(encClientToServerLen, result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMultipleRecordsInOneBufferBiggerThenPacketBufferSize(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
ByteBuffer plainClientOut = allocateBuffer(param.type(), 4096);
ByteBuffer plainServerOut = allocateBuffer(param.type(), server.getSession().getApplicationBufferSize());
ByteBuffer encClientToServer = allocateBuffer(param.type(), server.getSession().getPacketBufferSize() * 2);
handshake(param.type(), param.delegate(), client, server);
int srcLen = plainClientOut.remaining();
SSLEngineResult result;
int count = 0;
do {
int plainClientOutPosition = plainClientOut.position();
int encClientToServerPosition = encClientToServer.position();
result = client.wrap(plainClientOut, encClientToServer);
if (result.getStatus() == Status.BUFFER_OVERFLOW) {
// We did not have enough room to wrap
assertEquals(plainClientOutPosition, plainClientOut.position());
assertEquals(encClientToServerPosition, encClientToServer.position());
break;
}
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(srcLen, result.bytesConsumed());
assertTrue(result.bytesProduced() > 0);
plainClientOut.clear();
++count;
} while (encClientToServer.position() < server.getSession().getPacketBufferSize());
// Check that we were able to wrap multiple times.
assertTrue(count >= 2);
encClientToServer.flip();
result = server.unwrap(encClientToServer, plainServerOut);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertTrue(result.bytesConsumed() > 0);
assertTrue(result.bytesProduced() > 0);
assertTrue(encClientToServer.hasRemaining());
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testBufferUnderFlow(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
ByteBuffer plainClient = allocateBuffer(param.type(), 1024);
plainClient.limit(plainClient.capacity());
ByteBuffer encClientToServer = allocateBuffer(param.type(), client.getSession().getPacketBufferSize());
ByteBuffer plainServer = allocateBuffer(param.type(), server.getSession().getApplicationBufferSize());
handshake(param.type(), param.delegate(), client, server);
SSLEngineResult result = client.wrap(plainClient, encClientToServer);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(result.bytesConsumed(), plainClient.capacity());
// Flip so we can read it.
encClientToServer.flip();
int remaining = encClientToServer.remaining();
// We limit the buffer so we have less then the header to read, this should result in an BUFFER_UNDERFLOW.
encClientToServer.limit(SSL_RECORD_HEADER_LENGTH - 1);
result = server.unwrap(encClientToServer, plainServer);
assertResultIsBufferUnderflow(result);
// We limit the buffer so we can read the header but not the rest, this should result in an
// BUFFER_UNDERFLOW.
encClientToServer.limit(SSL_RECORD_HEADER_LENGTH);
result = server.unwrap(encClientToServer, plainServer);
assertResultIsBufferUnderflow(result);
// We limit the buffer so we can read the header and partly the rest, this should result in an
// BUFFER_UNDERFLOW.
encClientToServer.limit(SSL_RECORD_HEADER_LENGTH + remaining - 1 - SSL_RECORD_HEADER_LENGTH);
result = server.unwrap(encClientToServer, plainServer);
assertResultIsBufferUnderflow(result);
// Reset limit so we can read the full record.
encClientToServer.limit(remaining);
result = server.unwrap(encClientToServer, plainServer);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertEquals(result.bytesConsumed(), remaining);
assertTrue(result.bytesProduced() > 0);
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
private static void assertResultIsBufferUnderflow(SSLEngineResult result) {
assertEquals(SSLEngineResult.Status.BUFFER_UNDERFLOW, result.getStatus());
assertEquals(0, result.bytesConsumed());
assertEquals(0, result.bytesProduced());
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testWrapDoesNotZeroOutSrc(SSLEngineTestParam param) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
clientSslCtx = wrapContext(param, SslContextBuilder
.forClient()
.trustManager(cert.cert())
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.endpointIdentificationAlgorithm(null)
.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
ByteBuffer plainServerOut =
allocateBuffer(param.type(), server.getSession().getApplicationBufferSize() / 2);
handshake(param.type(), param.delegate(), client, server);
// Fill the whole buffer and flip it.
for (int i = 0; i < plainServerOut.capacity(); i++) {
plainServerOut.put(i, (byte) i);
}
plainServerOut.position(plainServerOut.capacity());
plainServerOut.flip();
ByteBuffer encryptedServerToClient =
allocateBuffer(param.type(), server.getSession().getPacketBufferSize());
SSLEngineResult result = server.wrap(plainServerOut, encryptedServerToClient);
assertEquals(SSLEngineResult.Status.OK, result.getStatus());
assertTrue(result.bytesConsumed() > 0);
for (int i = 0; i < plainServerOut.capacity(); i++) {
assertEquals((byte) i, plainServerOut.get(i));
}
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testDisableProtocols(SSLEngineTestParam param) throws Exception {
testDisableProtocols(param, SslProtocols.SSL_v2, SslProtocols.SSL_v2);
testDisableProtocols(param, SslProtocols.SSL_v3, SslProtocols.SSL_v2, SslProtocols.SSL_v3);
testDisableProtocols(param, SslProtocols.TLS_v1, SslProtocols.SSL_v2, SslProtocols.SSL_v3, SslProtocols.TLS_v1);
testDisableProtocols(param,
SslProtocols.TLS_v1_1, SslProtocols.SSL_v2, SslProtocols.SSL_v3,
SslProtocols.TLS_v1, SslProtocols.TLS_v1_1);
testDisableProtocols(param, SslProtocols.TLS_v1_2, SslProtocols.SSL_v2,
SslProtocols.SSL_v3, SslProtocols.TLS_v1, SslProtocols.TLS_v1_1, SslProtocols.TLS_v1_2);
}
private void testDisableProtocols(SSLEngineTestParam param,
String protocol, String... disabledProtocols) throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
SslContext ctx = wrapContext(param, SslContextBuilder
.forServer(cert.certificate(), cert.privateKey())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine server = wrapEngine(ctx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
Set<String> supported = new HashSet<String>(Arrays.asList(server.getSupportedProtocols()));
if (supported.contains(protocol)) {
server.setEnabledProtocols(server.getSupportedProtocols());
assertEquals(supported, new HashSet<String>(Arrays.asList(server.getSupportedProtocols())));
for (String disabled : disabledProtocols) {
supported.remove(disabled);
}
if (supported.contains(SslProtocols.SSL_v2_HELLO) && supported.size() == 1) {
// It's not allowed to set only PROTOCOL_SSL_V2_HELLO if using JDK SSLEngine.
return;
}
server.setEnabledProtocols(supported.toArray(new String[0]));
assertEquals(supported, new HashSet<String>(Arrays.asList(server.getEnabledProtocols())));
server.setEnabledProtocols(server.getSupportedProtocols());
}
} finally {
cleanupServerSslEngine(server);
cleanupClientSslContext(ctx);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testUsingX509TrustManagerVerifiesHostname(SSLEngineTestParam param) throws Exception {
testUsingX509TrustManagerVerifiesHostname(param, false);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testUsingX509TrustManagerVerifiesSNIHostname(SSLEngineTestParam param) throws Exception {
testUsingX509TrustManagerVerifiesHostname(param, true);
}
private void testUsingX509TrustManagerVerifiesHostname(SSLEngineTestParam param, boolean useSNI) throws Exception {
if (clientSslContextProvider() != null) {
// Not supported when using conscrypt
return;
}
String fqdn = "something.netty.io";
X509Bundle cert = new CertificateBuilder()
.subject("CN=" + fqdn)
.setIsCertificateAuthority(true)
.buildSelfSigned();
TrustManagerFactory clientTrustManagerFactory = new TrustManagerFactory(new TrustManagerFactorySpi() {
@Override
protected void engineInit(KeyStore keyStore) {
// NOOP
}
@Override
protected TrustManager[] engineGetTrustManagers() {
// Provide a custom trust manager, this manager trust all certificates
return new TrustManager[]{
new X509TrustManager() {
@Override
public void checkClientTrusted(
java.security.cert.X509Certificate[] x509Certificates, String s) {
// NOOP
}
@Override
public void checkServerTrusted(
java.security.cert.X509Certificate[] x509Certificates, String s) {
// NOOP
}
@Override
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return EmptyArrays.EMPTY_X509_CERTIFICATES;
}
}
};
}
@Override
protected void engineInit(ManagerFactoryParameters managerFactoryParameters) {
}
}, null, TrustManagerFactory.getDefaultAlgorithm()) {
};
SslContextBuilder clientSslContextBuilder = SslContextBuilder
.forClient()
.trustManager(clientTrustManagerFactory)
.sslContextProvider(clientSslContextProvider())
.sslProvider(sslClientProvider())
.endpointIdentificationAlgorithm("HTTPS");
if (useSNI) {
clientSslContextBuilder.serverName(new SNIHostName(fqdn));
}
clientSslCtx = wrapContext(param, clientSslContextBuilder.build());
SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT, "127.0.0.1", 1234));
serverSslCtx = wrapContext(param, SslContextBuilder
.forServer(cert.getKeyPair().getPrivate(), cert.getCertificatePath())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.build());
SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
try {
handshake(param.type(), param.delegate(), client, server);
if (!useSNI) {
fail();
}
} catch (SSLException exception) {
if (useSNI) {
throw exception;
}
// expected as the hostname not matches.
} finally {
cleanupClientSslEngine(client);
cleanupServerSslEngine(server);
}
}
@Test
public void testInvalidCipher() throws Exception {
SelfSignedCertificate cert = CachedSelfSignedCertificate.getCachedCertificate();
List<String> cipherList = new ArrayList<String>();
Collections.addAll(cipherList, ((SSLSocketFactory) SSLSocketFactory.getDefault()).getDefaultCipherSuites());
cipherList.add("InvalidCipher");
SSLEngine server = null;
try {
serverSslCtx = wrapContext(null, SslContextBuilder.forServer(cert.key(), cert.cert())
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.ciphers(cipherList).build());
server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
fail();
} catch (IllegalArgumentException | SSLException expected) {
// expected when invalid cipher is used.
} finally {
cleanupServerSslEngine(server);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testGetCiphersuite(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
String clientCipher = clientEngine.getSession().getCipherSuite();
String serverCipher = serverEngine.getSession().getCipherSuite();
assertEquals(clientCipher, serverCipher);
assertEquals(param.protocolCipherCombo.cipher, clientCipher);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSessionCache(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
doHandshakeVerifyReusedAndClose(param, "a.netty.io", 9999, false);
doHandshakeVerifyReusedAndClose(param, "a.netty.io", 9999, true);
doHandshakeVerifyReusedAndClose(param, "b.netty.io", 9999, false);
invalidateSessionsAndAssert(serverSslCtx.sessionContext());
invalidateSessionsAndAssert(clientSslCtx.sessionContext());
}
protected void invalidateSessionsAndAssert(SSLSessionContext context) {
Enumeration<byte[]> ids = context.getIds();
while (ids.hasMoreElements()) {
byte[] id = ids.nextElement();
SSLSession session = context.getSession(id);
if (session != null) {
session.invalidate();
assertFalse(session.isValid());
assertNull(context.getSession(id));
}
}
}
private static void assertSessionCache(SSLSessionContext sessionContext, int numSessions) {
Enumeration<byte[]> ids = sessionContext.getIds();
int numIds = 0;
while (ids.hasMoreElements()) {
numIds++;
byte[] id = ids.nextElement();
assertNotEquals(0, id.length);
SSLSession session = sessionContext.getSession(id);
assertArrayEquals(id, session.getId());
}
assertEquals(numSessions, numIds);
}
private void doHandshakeVerifyReusedAndClose(SSLEngineTestParam param, String host, int port, boolean reuse)
throws Exception {
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT, host, port));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
int clientSessions = currentSessionCacheSize(clientSslCtx.sessionContext());
int serverSessions = currentSessionCacheSize(serverSslCtx.sessionContext());
int nCSessions = clientSessions;
int nSSessions = serverSessions;
SessionReusedState clientSessionReused = SessionReusedState.NOT_REUSED;
SessionReusedState serverSessionReused = SessionReusedState.NOT_REUSED;
if (param.protocolCipherCombo == ProtocolCipherCombo.TLSV13) {
// Allocate something which is big enough for sure
ByteBuffer packetBuffer = allocateBuffer(param.type(), 32 * 1024);
ByteBuffer appBuffer = allocateBuffer(param.type(), 32 * 1024);
appBuffer.clear().position(4).flip();
packetBuffer.clear();
do {
SSLEngineResult result;
do {
result = serverEngine.wrap(appBuffer, packetBuffer);
} while (appBuffer.hasRemaining() || result.bytesProduced() > 0);
appBuffer.clear();
packetBuffer.flip();
do {
result = clientEngine.unwrap(packetBuffer, appBuffer);
} while (packetBuffer.hasRemaining() || result.bytesProduced() > 0);
packetBuffer.clear();
appBuffer.clear().position(4).flip();
do {
result = clientEngine.wrap(appBuffer, packetBuffer);
} while (appBuffer.hasRemaining() || result.bytesProduced() > 0);
appBuffer.clear();
packetBuffer.flip();
do {
result = serverEngine.unwrap(packetBuffer, appBuffer);
} while (packetBuffer.hasRemaining() || result.bytesProduced() > 0);
packetBuffer.clear();
appBuffer.clear().position(4).flip();
nCSessions = currentSessionCacheSize(clientSslCtx.sessionContext());
nSSessions = currentSessionCacheSize(serverSslCtx.sessionContext());
clientSessionReused = isSessionReused(clientEngine);
serverSessionReused = isSessionReused(serverEngine);
} while ((reuse && (clientSessionReused == SessionReusedState.NOT_REUSED ||
serverSessionReused == SessionReusedState.NOT_REUSED))
|| (!reuse && (nCSessions < clientSessions ||
// server may use multiple sessions
nSSessions < serverSessions)));
}
assertSessionReusedForEngine(clientEngine, serverEngine, reuse);
String key = "key";
if (reuse) {
if (clientSessionReused != SessionReusedState.NOT_REUSED) {
// We should see the previous stored value on session reuse.
// This is broken in conscrypt.
// TODO: Open an issue in the conscrypt project.
if (!Conscrypt.isEngineSupported(clientEngine)) {
assertEquals(Boolean.TRUE, clientEngine.getSession().getValue(key));
}
if (clientSessionReused == SessionReusedState.REUSED) {
// If we know for sure it was reused so the accessedTime needs to be larger.
assertThat(clientEngine.getSession().getLastAccessedTime())
.isGreaterThan(clientEngine.getSession().getCreationTime());
} else {
assertThat(clientEngine.getSession().getLastAccessedTime())
.isGreaterThanOrEqualTo(clientEngine.getSession().getCreationTime());
}
}
} else {
// Ensure we sleep 1ms in between as getLastAccessedTime() abd getCreationTime() are in milliseconds.
// If we don't sleep and execution is very fast we will see test-failures once we go into the
// reuse branch.
Thread.sleep(1);
clientEngine.getSession().putValue(key, Boolean.TRUE);
}
closeOutboundAndInbound(param.type(), clientEngine, serverEngine);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
protected | TestByteBufAllocator |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/config/provider/AllowedPathsTest.java | {
"start": 1400,
"end": 3990
} | class ____ {
private AllowedPaths allowedPaths;
@TempDir
private File parent;
private String dir;
private String myFile;
private String dir2;
@BeforeEach
public void setup() throws IOException {
dir = Files.createDirectory(Paths.get(parent.toString(), "dir")).toString();
myFile = Files.createFile(Paths.get(dir, "myFile")).toString();
dir2 = Files.createDirectory(Paths.get(parent.toString(), "dir2")).toString();
}
@Test
public void testAllowedPath() {
allowedPaths = new AllowedPaths(String.join(",", dir, dir2));
Path actual = allowedPaths.parseUntrustedPath(myFile);
assertEquals(myFile, actual.toString());
}
@Test
public void testNotAllowedPath() {
allowedPaths = new AllowedPaths(dir);
Path actual = allowedPaths.parseUntrustedPath(dir2);
assertNull(actual);
}
@Test
public void testNullAllowedPaths() {
allowedPaths = new AllowedPaths(null);
Path actual = allowedPaths.parseUntrustedPath(myFile);
assertEquals(myFile, actual.toString());
}
@Test
public void testNoTraversal() {
allowedPaths = new AllowedPaths(dir);
Path traversedPath = Paths.get(dir, "..", "dir2");
Path actual = allowedPaths.parseUntrustedPath(traversedPath.toString());
assertNull(actual);
}
@Test
public void testAllowedTraversal() {
allowedPaths = new AllowedPaths(String.join(",", dir, dir2));
Path traversedPath = Paths.get(dir, "..", "dir2");
Path actual = allowedPaths.parseUntrustedPath(traversedPath.toString());
assertEquals(traversedPath.normalize(), actual);
}
@Test
public void testNullAllowedPathsTraversal() {
allowedPaths = new AllowedPaths("");
Path traversedPath = Paths.get(dir, "..", "dir2");
Path actual = allowedPaths.parseUntrustedPath(traversedPath.toString());
// we expect non-normalised path if allowed.paths is not specified to avoid backward compatibility
assertEquals(traversedPath, actual);
}
@Test
public void testAllowedPathDoesNotExist() {
Exception e = assertThrows(ConfigException.class, () -> new AllowedPaths("/foo"));
assertEquals("Path /foo does not exist", e.getMessage());
}
@Test
public void testAllowedPathIsNotAbsolute() {
Exception e = assertThrows(ConfigException.class, () -> new AllowedPaths("foo bar "));
assertEquals("Path foo bar is not absolute", e.getMessage());
}
}
| AllowedPathsTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PreferredInterfaceTypeTest.java | {
"start": 27175,
"end": 27574
} | class ____ {
private static final ImmutableSet<String> FOO;
static {
FOO = ImmutableSet.of();
}
}
""")
.doTest();
}
@Test
public void nonStatic() {
testHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.collect.ImmutableSet;
| Test |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/source/coordinator/SourceCoordinatorProviderTest.java | {
"start": 1847,
"end": 6317
} | class ____ {
private static final OperatorID OPERATOR_ID = new OperatorID(1234L, 5678L);
private static final int NUM_SPLITS = 10;
private SourceCoordinatorProvider<MockSourceSplit> provider;
@BeforeEach
void setup() {
provider =
new SourceCoordinatorProvider<>(
"SourceCoordinatorProviderTest",
OPERATOR_ID,
new MockSource(Boundedness.BOUNDED, NUM_SPLITS),
1,
WatermarkAlignmentParams.WATERMARK_ALIGNMENT_DISABLED,
null);
}
@Test
void testCreate() throws Exception {
OperatorCoordinator coordinator =
provider.create(new MockOperatorCoordinatorContext(OPERATOR_ID, NUM_SPLITS));
assertThat(coordinator).isInstanceOf(RecreateOnResetOperatorCoordinator.class);
}
@Test
void testCheckpointAndReset() throws Exception {
final OperatorCoordinator.Context context =
new MockOperatorCoordinatorContext(OPERATOR_ID, NUM_SPLITS);
final RecreateOnResetOperatorCoordinator coordinator =
(RecreateOnResetOperatorCoordinator) provider.create(context);
final SourceCoordinator<?, ?> sourceCoordinator =
(SourceCoordinator<?, ?>) coordinator.getInternalCoordinator();
// Start the coordinator.
coordinator.start();
// register reader 0 and take a checkpoint.
coordinator.handleEventFromOperator(0, 0, new ReaderRegistrationEvent(0, "location"));
CompletableFuture<byte[]> future = new CompletableFuture<>();
coordinator.checkpointCoordinator(0L, future);
byte[] bytes = future.get();
// Register reader 1.
coordinator.handleEventFromOperator(1, 0, new ReaderRegistrationEvent(1, "location"));
// Wait until the coordinator context is updated with registration of reader 1.
while (sourceCoordinator.getContext().registeredReaders().size() < 2) {
Thread.sleep(1);
}
// reset the coordinator to the checkpoint which only contains reader 0.
coordinator.resetToCheckpoint(0L, bytes);
final SourceCoordinator<?, ?> restoredSourceCoordinator =
(SourceCoordinator<?, ?>) coordinator.getInternalCoordinator();
assertThat(sourceCoordinator)
.as("The restored source coordinator should be a different instance")
.isNotEqualTo(restoredSourceCoordinator);
// FLINK-21452: do not (re)store registered readers
assertThat(restoredSourceCoordinator.getContext().registeredReaders())
.as("There should be no registered reader.")
.isEmpty();
}
@Test
void testCallAsyncExceptionFailsJob() throws Exception {
MockOperatorCoordinatorContext context =
new MockOperatorCoordinatorContext(OPERATOR_ID, NUM_SPLITS);
RecreateOnResetOperatorCoordinator coordinator =
(RecreateOnResetOperatorCoordinator) provider.create(context);
SourceCoordinator<?, ?> sourceCoordinator =
(SourceCoordinator<?, ?>) coordinator.getInternalCoordinator();
sourceCoordinator
.getContext()
.callAsync(
() -> null,
(ignored, e) -> {
throw new RuntimeException();
});
CommonTestUtils.waitUtil(
context::isJobFailed,
Duration.ofSeconds(10L),
"The job did not fail before timeout.");
}
@Test
void testCoordinatorExecutorThreadFactoryNewMultipleThread() {
SourceCoordinatorProvider.CoordinatorExecutorThreadFactory
coordinatorExecutorThreadFactory =
new SourceCoordinatorProvider.CoordinatorExecutorThreadFactory(
"test_coordinator_thread",
new MockOperatorCoordinatorContext(
new OperatorID(1234L, 5678L), 3));
coordinatorExecutorThreadFactory.newThread(() -> {});
// coordinatorExecutorThreadFactory cannot create multiple threads.
assertThatThrownBy(() -> coordinatorExecutorThreadFactory.newThread(() -> {}))
.isInstanceOf(IllegalStateException.class);
}
}
| SourceCoordinatorProviderTest |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsInOutSynchronousTest.java | {
"start": 1587,
"end": 3575
} | class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
private static String beforeThreadName;
private static String afterThreadName;
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
private String reply;
private final String url = "activemq:queue:JmsInOutSynchronousTest?synchronous=true";
@BeforeEach
public void sendMessage() {
reply = template.requestBody("direct:start", "Hello World", String.class);
}
@Test
public void testSynchronous() {
assertEquals("Bye World", reply);
assertTrue(beforeThreadName.equalsIgnoreCase(afterThreadName), "Should use same threads");
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start")
.to("log:before")
.process(exchange -> beforeThreadName = Thread.currentThread().getName())
.to(url)
.process(exchange -> afterThreadName = Thread.currentThread().getName())
.to("log:after")
.to("mock:result");
from("activemq:queue:JmsInOutSynchronousTest").process(exchange -> exchange.getMessage().setBody("Bye World"));
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
| JmsInOutSynchronousTest |
java | google__jimfs | jimfs/src/test/java/com/google/common/jimfs/RegularFileTest.java | {
"start": 1639,
"end": 4128
} | class ____ {
/**
* Returns a test suite for testing file methods with a variety of {@code HeapDisk}
* configurations.
*/
public static TestSuite suite() {
TestSuite suite = new TestSuite();
for (ReuseStrategy reuseStrategy : EnumSet.allOf(ReuseStrategy.class)) {
TestSuite suiteForReuseStrategy = new TestSuite(reuseStrategy.toString());
Set<List<Integer>> sizeOptions =
Sets.cartesianProduct(ImmutableList.of(BLOCK_SIZES, CACHE_SIZES));
for (List<Integer> options : sizeOptions) {
int blockSize = options.get(0);
int cacheSize = options.get(1);
if (cacheSize > 0 && cacheSize < blockSize) {
// skip cases where the cache size is not -1 (all) or 0 (none) but it is < blockSize,
// because this is equivalent to a cache size of 0
continue;
}
TestConfiguration state = new TestConfiguration(blockSize, cacheSize, reuseStrategy);
TestSuite suiteForTest = new TestSuite(state.toString());
for (Method method : TEST_METHODS) {
RegularFileTestRunner tester = new RegularFileTestRunner(method.getName(), state);
suiteForTest.addTest(tester);
}
suiteForReuseStrategy.addTest(suiteForTest);
}
suite.addTest(suiteForReuseStrategy);
}
return suite;
}
public static final ImmutableSet<Integer> BLOCK_SIZES = ImmutableSet.of(2, 8, 128, 8192);
public static final ImmutableSet<Integer> CACHE_SIZES = ImmutableSet.of(0, 4, 16, 128, -1);
private static final ImmutableList<Method> TEST_METHODS =
FluentIterable.from(Arrays.asList(RegularFileTestRunner.class.getDeclaredMethods()))
.filter(
new Predicate<Method>() {
@Override
public boolean apply(Method method) {
return method.getName().startsWith("test")
&& Modifier.isPublic(method.getModifiers())
&& method.getParameterTypes().length == 0;
}
})
.toList();
/**
* Different strategies for handling reuse of disks and/or files between tests, intended to ensure
* that {@link HeapDisk} operates properly in a variety of usage states including newly created,
* having created files that have not been deleted yet, having created files that have been
* deleted, and having created files some of which have been deleted and some of which have not.
*/
public | RegularFileTest |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/configuration/ConfigCompatibility.java | {
"start": 10879,
"end": 25552
} | class ____ implements ConfigSourceInterceptor {
@Serial
private static final long serialVersionUID = 6840768821115677665L;
private static final BackEnd instance = new BackEnd();
private BackEnd() {
}
public ConfigValue getValue(final ConfigSourceInterceptorContext context, final String name) {
NameIterator ni = new NameIterator(name);
BiFunction<ConfigSourceInterceptorContext, NameIterator, ConfigValue> function = newNames.findRootValue(ni);
return function != null ? function.apply(context, ni) : context.proceed(name);
}
public static BackEnd instance() {
return instance;
}
}
// front end mappings here
private static List<String> quarkusPackageType(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// check the value to see what properties we need to define
ConfigValue legacyPackageType = ctxt.proceed(ni.getName());
if (legacyPackageType == null) {
// nothing to do
return List.of();
}
// override defaults of all of these properties
return List.of("quarkus.package.jar.enabled", "quarkus.package.jar.type", "quarkus.native.enabled",
"quarkus.native.sources-only");
}
private static List<String> quarkusPackageCreateAppcds(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.appcds.enabled");
}
private static List<String> quarkusPackageAppcdsBuilderImage(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.appcds.builder-image");
}
private static List<String> quarkusPackageAppcdsUseContainer(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.appcds.use-container");
}
private static List<String> quarkusPackageCompressJar(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.compress");
}
private static List<String> quarkusFilterOptionalDependencies(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.filter-optional-dependencies");
}
private static List<String> quarkusPackageAddRunnerSuffix(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.add-runner-suffix");
}
private static List<String> quarkusPackageUserConfiguredIgnoredEntries(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
return List.of("quarkus.package.jar.user-configured-ignored-entries");
}
private static List<String> quarkusPackageIncludeDependencyList(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.include-dependency-list");
}
private static List<String> quarkusPackageUserProvidersDirectory(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
return List.of("quarkus.package.jar.user-providers-directory");
}
private static List<String> quarkusPackageIncludedOptionalDependencies(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
return List.of("quarkus.package.jar.included-optional-dependencies");
}
private static List<String> quarkusPackageDecompilerVersion(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// always hide this ignored property
return List.of();
}
private static List<String> quarkusPackageDecompilerEnabled(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// simple mapping to a new name
return List.of("quarkus.package.jar.decompiler.enabled");
}
private static List<String> quarkusPackageDecompilerJarDirectory(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// simple mapping to a new name
return List.of("quarkus.package.jar.decompiler.jar-directory");
}
private static List<String> quarkusPackageManifestAttributes(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// mapping to a new name, copying the last segment
ni.goToEnd();
ni.previous();
return List.of("quarkus.package.jar.manifest.attributes." + ni.getName().substring(ni.getPosition() + 1));
}
private static List<String> quarkusPackageManifestSections(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// mapping to a new name, copying the last two segments
ni.goToEnd();
ni.previous();
ni.previous();
return List.of("quarkus.package.jar.manifest.sections." + ni.getName().substring(ni.getPosition() + 1));
}
private static List<String> quarkusPackageManifestAddImplementationEntries(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
// simple mapping to a new name
return List.of("quarkus.package.jar.manifest.add-implementation-entries");
}
// back end mappings here
private static final Set<String> ANY_NATIVE = Set.of("native", "native-sources");
private static ConfigValue quarkusNativeEnabled(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// GraalVM native image is enabled if the legacy package type is "native" or "native sources"
ConfigValue ptVal = ctxt.restart("quarkus.package.type");
if (ptVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
// map old name to new name
return ptVal.withName(ni.getName()).withValue(
Boolean.toString(ANY_NATIVE.contains(ptVal.getValue())));
}
}
private static ConfigValue quarkusPackageJarEnabled(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// the JAR packaging type is present if a JAR type was configured in the legacy property
ConfigValue ptVal = ctxt.restart("quarkus.package.type");
if (ptVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return ptVal.withName(ni.getName()).withValue(
Boolean.toString(!ANY_NATIVE.contains(ptVal.getValue())));
}
}
private static ConfigValue quarkusPackageJarAppcdsEnabled(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.create-appcds");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarAppcdsBuilderImage(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.appcds-builder-image");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarAppcdsUseContainer(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.appcds-use-container");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarType(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue ptVal = ctxt.restart("quarkus.package.type");
if (ptVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return ANY_NATIVE.contains(ptVal.getValue()) ? ctxt.proceed(ni.getName()) : ptVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarCompress(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.compress-jar");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarFilterOptionalDependencies(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.filter-optional-dependencies");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarAddRunnerSuffix(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.add-runner-suffix");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarUserConfiguredIgnoredEntries(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.user-configured-ignored-entries");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarUserProvidersDirectory(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.user-providers-directory");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarIncludedOptionalDependencies(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.included-optional-dependencies");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarIncludeDependencyList(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.include-dependency-list");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusNativeSourcesOnly(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// GraalVM native image is enabled if the legacy package type is "native" or "native sources"
ConfigValue ptVal = ctxt.restart("quarkus.package.type");
if (ptVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
// map old name to new name
return ptVal.withName(ni.getName()).withValue(Boolean.toString(ptVal.getValue().equals("native-sources")));
}
}
private static ConfigValue quarkusPackageJarManifestAttributes(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// mapping from a legacy name, copying the last segment
ni.goToEnd();
ni.previous();
String oldName = "quarkus.package.manifest.attributes." + ni.getName().substring(ni.getPosition() + 1);
ConfigValue oldVal = ctxt.restart(oldName);
if (oldVal == null) {
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarManifestSections(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
// mapping from a legacy name, copying the last two segments
ni.goToEnd();
ni.previous();
ni.previous();
String oldName = "quarkus.package.manifest.sections." + ni.getName().substring(ni.getPosition() + 1);
ConfigValue oldVal = ctxt.restart(oldName);
if (oldVal == null) {
return ctxt.proceed(ni.getName());
} else {
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarManifestAddImplementationEntries(ConfigSourceInterceptorContext ctxt,
NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.manifest.add-implementation-entries");
if (oldVal == null) {
// on to the default value
return ctxt.proceed(ni.getName());
} else {
// map old name to new name
return oldVal.withName(ni.getName());
}
}
private static ConfigValue quarkusPackageJarDecompilerEnabled(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.decompiler.enabled");
if (oldVal == null) {
return ctxt.proceed(ni.getName());
}
// map old name to new name
return oldVal.withName(ni.getName());
}
private static ConfigValue quarkusPackageJarDecompilerJarDirectory(ConfigSourceInterceptorContext ctxt, NameIterator ni) {
ConfigValue oldVal = ctxt.restart("quarkus.package.decompiler.jar-directory");
if (oldVal == null) {
return ctxt.proceed(ni.getName());
}
// map old name to new name
return oldVal.withName(ni.getName());
}
// utilities
@SafeVarargs
private static <T> KeyMap<T> keyMap(
Map.Entry<List<String>, T>... entries) {
KeyMap<T> keyMap = new KeyMap<>();
KeyMap<T> subMap;
for (Map.Entry<List<String>, T> entry : entries) {
subMap = keyMap;
for (String part : entry.getKey()) {
if (part.equals("*")) {
subMap = subMap.getOrCreateAny();
} else {
KeyMap<T> tryMap = subMap.get(part);
if (tryMap == null) {
tryMap = new KeyMap<>();
subMap.put(part, tryMap);
}
subMap = tryMap;
}
}
subMap.putRootValue(entry.getValue());
}
return keyMap;
}
}
| BackEnd |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/env/StandardEnvironment.java | {
"start": 2470,
"end": 4206
} | class ____ extends AbstractEnvironment {
/** System environment property source name: {@value}. */
public static final String SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME = "systemEnvironment";
/** JVM system properties property source name: {@value}. */
public static final String SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME = "systemProperties";
/**
* Create a new {@code StandardEnvironment} instance with a default
* {@link MutablePropertySources} instance.
*/
public StandardEnvironment() {
}
/**
* Create a new {@code StandardEnvironment} instance with a specific
* {@link MutablePropertySources} instance.
* @param propertySources property sources to use
* @since 5.3.4
*/
protected StandardEnvironment(MutablePropertySources propertySources) {
super(propertySources);
}
/**
* Customize the set of property sources with those appropriate for any standard
* Java environment:
* <ul>
* <li>{@value #SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME}
* <li>{@value #SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME}
* </ul>
* <p>Properties present in {@value #SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME} will
* take precedence over those in {@value #SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME}.
* @see AbstractEnvironment#customizePropertySources(MutablePropertySources)
* @see #getSystemProperties()
* @see #getSystemEnvironment()
*/
@Override
protected void customizePropertySources(MutablePropertySources propertySources) {
propertySources.addLast(
new PropertiesPropertySource(SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME, getSystemProperties()));
propertySources.addLast(
new SystemEnvironmentPropertySource(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, getSystemEnvironment()));
}
}
| StandardEnvironment |
java | apache__flink | flink-python/src/main/java/org/apache/flink/formats/avro/AvroRowSerializationSchema.java | {
"start": 3898,
"end": 15929
} | class ____ not be null.");
this.recordClazz = recordClazz;
this.schema = SpecificData.get().getSchema(recordClazz);
this.schemaString = schema.toString();
this.datumWriter = new SpecificDatumWriter<>(schema);
this.arrayOutputStream = new ByteArrayOutputStream();
this.encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null);
}
/**
* Creates an Avro serialization schema for the given Avro schema string.
*
* @param avroSchemaString Avro schema string used to serialize Flink's row to Avro's record
*/
public AvroRowSerializationSchema(String avroSchemaString) {
Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null.");
this.recordClazz = null;
this.schemaString = avroSchemaString;
try {
this.schema = new Schema.Parser().parse(avroSchemaString);
} catch (SchemaParseException e) {
throw new IllegalArgumentException("Could not parse Avro schema string.", e);
}
this.datumWriter = new GenericDatumWriter<>(schema);
this.arrayOutputStream = new ByteArrayOutputStream();
this.encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null);
}
@Override
public byte[] serialize(Row row) {
return serialize(row, true);
}
@VisibleForTesting
byte[] serialize(Row row, boolean legacyTimestampMapping) {
try {
// convert to record
final GenericRecord record =
convertRowToAvroRecord(schema, row, legacyTimestampMapping);
arrayOutputStream.reset();
datumWriter.write(record, encoder);
encoder.flush();
return arrayOutputStream.toByteArray();
} catch (Exception e) {
throw new RuntimeException("Failed to serialize row.", e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final AvroRowSerializationSchema that = (AvroRowSerializationSchema) o;
return Objects.equals(recordClazz, that.recordClazz)
&& Objects.equals(schemaString, that.schemaString);
}
@Override
public int hashCode() {
return Objects.hash(recordClazz, schemaString);
}
// --------------------------------------------------------------------------------------------
private GenericRecord convertRowToAvroRecord(
Schema schema, Row row, boolean legacyTimestampMapping) {
final List<Schema.Field> fields = schema.getFields();
final int length = fields.size();
final GenericRecord record = new GenericData.Record(schema);
for (int i = 0; i < length; i++) {
final Schema.Field field = fields.get(i);
record.put(
i, convertFlinkType(field.schema(), row.getField(i), legacyTimestampMapping));
}
return record;
}
private Object convertFlinkType(Schema schema, Object object, boolean legacyTimestampMapping) {
if (object == null) {
return null;
}
switch (schema.getType()) {
case RECORD:
if (object instanceof Row) {
return convertRowToAvroRecord(schema, (Row) object, legacyTimestampMapping);
}
throw new IllegalStateException("Row expected but was: " + object.getClass());
case ENUM:
return new GenericData.EnumSymbol(schema, object.toString());
case ARRAY:
final Schema elementSchema = schema.getElementType();
final Object[] array = (Object[]) object;
final GenericData.Array<Object> convertedArray =
new GenericData.Array<>(array.length, schema);
for (Object element : array) {
convertedArray.add(
convertFlinkType(elementSchema, element, legacyTimestampMapping));
}
return convertedArray;
case MAP:
final Map<?, ?> map = (Map<?, ?>) object;
final Map<Utf8, Object> convertedMap = new HashMap<>();
for (Map.Entry<?, ?> entry : map.entrySet()) {
convertedMap.put(
new Utf8(entry.getKey().toString()),
convertFlinkType(
schema.getValueType(),
entry.getValue(),
legacyTimestampMapping));
}
return convertedMap;
case UNION:
final List<Schema> types = schema.getTypes();
final int size = types.size();
final Schema actualSchema;
if (size == 2 && types.get(0).getType() == Schema.Type.NULL) {
actualSchema = types.get(1);
} else if (size == 2 && types.get(1).getType() == Schema.Type.NULL) {
actualSchema = types.get(0);
} else if (size == 1) {
actualSchema = types.get(0);
} else {
// generic type
return object;
}
return convertFlinkType(actualSchema, object, legacyTimestampMapping);
case FIXED:
// check for logical type
if (object instanceof BigDecimal) {
return new GenericData.Fixed(
schema, convertFromDecimal(schema, (BigDecimal) object));
}
return new GenericData.Fixed(schema, (byte[]) object);
case STRING:
return new Utf8(object.toString());
case BYTES:
// check for logical type
if (object instanceof BigDecimal) {
return ByteBuffer.wrap(convertFromDecimal(schema, (BigDecimal) object));
}
return ByteBuffer.wrap((byte[]) object);
case INT:
// check for logical types
if (object instanceof Date) {
return convertFromDate(schema, (Date) object);
} else if (object instanceof LocalDate) {
return convertFromDate(schema, Date.valueOf((LocalDate) object));
} else if (object instanceof Time) {
return convertFromTimeMillis(schema, (Time) object);
} else if (object instanceof LocalTime) {
return convertFromTimeMillis(schema, Time.valueOf((LocalTime) object));
}
return object;
case LONG:
// check for logical type
if (object instanceof Timestamp) {
return convertFromTimestamp(schema, (Timestamp) object, legacyTimestampMapping);
} else if (object instanceof LocalDateTime) {
return convertFromTimestamp(
schema,
Timestamp.valueOf((LocalDateTime) object),
legacyTimestampMapping);
} else if (object instanceof Time) {
return convertFromTimeMicros(schema, (Time) object);
}
return object;
case FLOAT:
case DOUBLE:
case BOOLEAN:
return object;
}
throw new RuntimeException("Unsupported Avro type:" + schema);
}
private byte[] convertFromDecimal(Schema schema, BigDecimal decimal) {
final LogicalType logicalType = schema.getLogicalType();
if (logicalType instanceof LogicalTypes.Decimal) {
final LogicalTypes.Decimal decimalType = (LogicalTypes.Decimal) logicalType;
// rescale to target type
final BigDecimal rescaled =
decimal.setScale(decimalType.getScale(), BigDecimal.ROUND_UNNECESSARY);
// byte array must contain the two's-complement representation of the
// unscaled integer value in big-endian byte order
return decimal.unscaledValue().toByteArray();
} else {
throw new RuntimeException("Unsupported decimal type.");
}
}
private int convertFromDate(Schema schema, Date date) {
final LogicalType logicalType = schema.getLogicalType();
if (logicalType == LogicalTypes.date()) {
// adopted from Apache Calcite
final long converted = toEpochMillis(date);
return (int) (converted / 86400000L);
} else {
throw new RuntimeException("Unsupported date type.");
}
}
private int convertFromTimeMillis(Schema schema, Time date) {
final LogicalType logicalType = schema.getLogicalType();
if (logicalType == LogicalTypes.timeMillis()) {
// adopted from Apache Calcite
final long converted = toEpochMillis(date);
return (int) (converted % 86400000L);
} else {
throw new RuntimeException("Unsupported time type.");
}
}
private long convertFromTimeMicros(Schema schema, Time date) {
final LogicalType logicalType = schema.getLogicalType();
if (logicalType == LogicalTypes.timeMicros()) {
// adopted from Apache Calcite
final long converted = toEpochMillis(date);
return (converted % 86400000L) * 1000L;
} else {
throw new RuntimeException("Unsupported time type.");
}
}
private long convertFromTimestamp(
Schema schema, Timestamp date, boolean legacyTimestampMapping) {
final LogicalType logicalType = schema.getLogicalType();
if (legacyTimestampMapping
&& (logicalType == LogicalTypes.localTimestampMillis()
|| logicalType == LogicalTypes.localTimestampMicros())) {
throw new RuntimeException("Unsupported local timestamp type.");
}
if (logicalType == LogicalTypes.timestampMillis()
|| logicalType == LogicalTypes.localTimestampMillis()) {
// adopted from Apache Calcite
final long time = date.getTime();
return time + (long) LOCAL_TZ.getOffset(time);
} else if (logicalType == LogicalTypes.timestampMicros()
|| logicalType == LogicalTypes.localTimestampMicros()) {
long millis = date.getTime();
long micros = millis * 1000 + (date.getNanos() % 1_000_000 / 1000);
long offset = LOCAL_TZ.getOffset(millis) * 1000L;
return micros + offset;
} else {
throw new RuntimeException("Unsupported timestamp type.");
}
}
private long toEpochMillis(java.util.Date date) {
final long time = date.getTime();
return time + (long) LOCAL_TZ.getOffset(time);
}
private void writeObject(ObjectOutputStream outputStream) throws IOException {
outputStream.writeObject(recordClazz);
outputStream.writeObject(schemaString); // support for null
}
@SuppressWarnings("unchecked")
private void readObject(ObjectInputStream inputStream)
throws ClassNotFoundException, IOException {
recordClazz = (Class<? extends SpecificRecord>) inputStream.readObject();
schemaString = (String) inputStream.readObject();
if (recordClazz != null) {
schema = SpecificData.get().getSchema(recordClazz);
} else {
schema = new Schema.Parser().parse(schemaString);
}
datumWriter = new SpecificDatumWriter<>(schema);
arrayOutputStream = new ByteArrayOutputStream();
encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null);
}
}
| must |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit4/rules/ParameterizedSpringRuleTests.java | {
"start": 1921,
"end": 3359
} | class ____ {
private static final AtomicInteger invocationCount = new AtomicInteger();
@ClassRule
public static final SpringClassRule springClassRule = new SpringClassRule();
@Rule
public final SpringMethodRule springMethodRule = new SpringMethodRule();
@Autowired
private ApplicationContext applicationContext;
@Autowired
private Pet pet;
@Parameter(0)
public String employeeBeanName;
@Parameter(1)
public String employeeName;
@Parameters(name = "bean [{0}], employee [{1}]")
public static String[][] employeeData() {
return new String[][] { { "employee1", "John Smith" }, { "employee2", "Jane Smith" } };
}
@BeforeClass
public static void BeforeClass() {
invocationCount.set(0);
}
@Test
public final void verifyPetAndEmployee() {
invocationCount.incrementAndGet();
// Verifying dependency injection:
assertThat(this.pet).as("The pet field should have been autowired.").isNotNull();
// Verifying 'parameterized' support:
Employee employee = this.applicationContext.getBean(this.employeeBeanName, Employee.class);
assertThat(employee.getName()).as("Name of the employee configured as bean [" + this.employeeBeanName + "].").isEqualTo(this.employeeName);
}
@AfterClass
public static void verifyNumParameterizedRuns() {
assertThat(invocationCount.get()).as("Number of times the parameterized test method was executed.").isEqualTo(employeeData().length);
}
}
| ParameterizedSpringRuleTests |
java | quarkusio__quarkus | extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/devui/Link.java | {
"start": 76,
"end": 1431
} | class ____ {
static Link dependency(String source, String target, int level) {
return new Link(source, target, "dependency", level);
}
static Link lookup(String source, String target, int level) {
return new Link(source, target, "lookup", level);
}
static Link builtin(String source, String target, int level) {
return new Link(source, target, "builtin", level);
}
static Link producer(String source, String target, int level) {
return new Link(source, target, "producer", level);
}
public final String source;
public final String target;
public final String type;
public final int level;
public Link(String source, String target, String type, int level) {
this.source = source;
this.target = target;
this.type = type;
this.level = level;
}
@Override
public int hashCode() {
return Objects.hash(source, target);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Link other = (Link) obj;
return Objects.equals(source, other.source) && Objects.equals(target, other.target);
}
}
| Link |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.