language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/calcite/RelTimeIndicatorConverter.java
|
{
"start": 5832,
"end": 35584
}
|
class ____ extends RelHomogeneousShuttle {
private final RexBuilder rexBuilder;
private RelTimeIndicatorConverter(RexBuilder rexBuilder) {
this.rexBuilder = rexBuilder;
}
public static RelNode convert(
RelNode rootRel, RexBuilder rexBuilder, boolean needFinalTimeIndicatorConversion) {
RelTimeIndicatorConverter converter = new RelTimeIndicatorConverter(rexBuilder);
RelNode convertedRoot = rootRel.accept(converter);
// FlinkLogicalLegacySink and FlinkLogicalSink are already converted
if (rootRel instanceof FlinkLogicalLegacySink
|| rootRel instanceof FlinkLogicalSink
|| !needFinalTimeIndicatorConversion) {
return convertedRoot;
}
// materialize remaining procTime indicators
return converter.materializeProcTime(convertedRoot);
}
@Override
public RelNode visit(RelNode node) {
if (node instanceof FlinkLogicalValues || node instanceof TableScan) {
return node;
} else if (node instanceof FlinkLogicalIntersect
|| node instanceof FlinkLogicalUnion
|| node instanceof FlinkLogicalMinus) {
return visitSetOp((SetOp) node);
} else if (node instanceof FlinkLogicalTableFunctionScan
|| node instanceof FlinkLogicalSnapshot
|| node instanceof FlinkLogicalRank
|| node instanceof FlinkLogicalDistribution
|| node instanceof FlinkLogicalWatermarkAssigner
|| node instanceof FlinkLogicalSort
|| node instanceof FlinkLogicalOverAggregate
|| node instanceof FlinkLogicalExpand
|| node instanceof FlinkLogicalScriptTransform) {
return visitSimpleRel(node);
} else if (node instanceof FlinkLogicalWindowAggregate) {
return visitWindowAggregate((FlinkLogicalWindowAggregate) node);
} else if (node instanceof FlinkLogicalWindowTableAggregate) {
return visitWindowTableAggregate((FlinkLogicalWindowTableAggregate) node);
} else if (node instanceof FlinkLogicalAggregate) {
return visitAggregate((FlinkLogicalAggregate) node);
} else if (node instanceof FlinkLogicalTableAggregate) {
return visitTableAggregate((FlinkLogicalTableAggregate) node);
} else if (node instanceof FlinkLogicalMatch) {
return visitMatch((FlinkLogicalMatch) node);
} else if (node instanceof FlinkLogicalCalc) {
return visitCalc((FlinkLogicalCalc) node);
} else if (node instanceof FlinkLogicalCorrelate) {
return visitCorrelate((FlinkLogicalCorrelate) node);
} else if (node instanceof FlinkLogicalJoin) {
return visitJoin((FlinkLogicalJoin) node);
} else if (node instanceof FlinkLogicalMultiJoin) {
return visitMultiJoin((FlinkLogicalMultiJoin) node);
} else if (node instanceof FlinkLogicalSink) {
return visitSink((FlinkLogicalSink) node);
} else if (node instanceof FlinkLogicalLegacySink) {
return visitSink((FlinkLogicalLegacySink) node);
} else {
return visitInvalidRel(node);
}
}
@Override
public RelNode visit(LogicalCalc calc) {
return visitInvalidRel(calc);
}
@Override
public RelNode visit(LogicalTableModify modify) {
return visitInvalidRel(modify);
}
private RelNode visitMatch(FlinkLogicalMatch match) {
RelNode newInput = match.getInput().accept(this);
RexTimeIndicatorMaterializer materializer = new RexTimeIndicatorMaterializer(newInput);
Function<Map<String, RexNode>, Map<String, RexNode>> materializeExprs =
rexNodesMap ->
rexNodesMap.entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().accept(materializer),
(e1, e2) -> e1,
LinkedHashMap::new));
// update input expressions
Map<String, RexNode> newPatternDefs = materializeExprs.apply(match.getPatternDefinitions());
Map<String, RexNode> newMeasures = materializeExprs.apply(match.getMeasures());
RexNode newInterval = null;
if (match.getInterval() != null) {
newInterval = match.getInterval().accept(materializer);
}
Predicate<String> isNoLongerTimeIndicator =
fieldName -> {
RexNode newMeasure = newMeasures.get(fieldName);
if (newMeasure == null) {
return false;
} else {
return !isTimeIndicatorType(newMeasure.getType());
}
};
// materialize all output types
RelDataType newOutputType =
getRowTypeWithoutTimeIndicator(match.getRowType(), isNoLongerTimeIndicator);
return new FlinkLogicalMatch(
match.getCluster(),
match.getTraitSet(),
newInput,
newOutputType,
match.getPattern(),
match.isStrictStart(),
match.isStrictEnd(),
newPatternDefs,
newMeasures,
match.getAfter(),
match.getSubsets(),
match.isAllRows(),
match.getPartitionKeys(),
match.getOrderKeys(),
newInterval);
}
private RelNode visitCalc(FlinkLogicalCalc calc) {
// visit children and update inputs
RelNode newInput = calc.getInput().accept(this);
RexProgram program = calc.getProgram();
// check if input field contains time indicator type
// materialize field if no time indicator is present anymore
// if input field is already materialized, change to timestamp type
RexTimeIndicatorMaterializer materializer = new RexTimeIndicatorMaterializer(newInput);
List<RexNode> newProjects =
program.getProjectList().stream()
.map(project -> program.expandLocalRef(project).accept(materializer))
.collect(Collectors.toList());
// materialize condition due to filter will validate condition type
RexNode newCondition = null;
if (program.getCondition() != null) {
newCondition = program.expandLocalRef(program.getCondition()).accept(materializer);
}
RexProgram newProgram =
RexProgram.create(
newInput.getRowType(),
newProjects,
newCondition,
program.getOutputRowType().getFieldNames(),
rexBuilder);
return calc.copy(calc.getTraitSet(), newInput, newProgram);
}
private RelNode visitJoin(FlinkLogicalJoin join) {
RelNode newLeft = join.getLeft().accept(this);
RelNode newRight = join.getRight().accept(this);
int leftFieldCount = newLeft.getRowType().getFieldCount();
// temporal table join
if (TemporalJoinUtil.satisfyTemporalJoin(join, newLeft, newRight)) {
RelNode rewrittenTemporalJoin =
join.copy(
join.getTraitSet(),
join.getCondition(),
newLeft,
newRight,
join.getJoinType(),
join.isSemiJoinDone());
// Materialize all of the time attributes from the right side of temporal join
Set<Integer> rightIndices =
IntStream.range(0, newRight.getRowType().getFieldCount())
.mapToObj(startIdx -> leftFieldCount + startIdx)
.collect(Collectors.toSet());
return createCalcToMaterializeTimeIndicators(rewrittenTemporalJoin, rightIndices);
} else {
if (JoinUtil.satisfyRegularJoin(join, newLeft, newRight)) {
// materialize time attribute fields of regular join's inputs
newLeft = materializeTimeIndicators(newLeft);
newRight = materializeTimeIndicators(newRight);
}
List<RelDataTypeField> leftRightFields = new ArrayList<>();
leftRightFields.addAll(newLeft.getRowType().getFieldList());
leftRightFields.addAll(newRight.getRowType().getFieldList());
RexNode newCondition =
join.getCondition()
.accept(
new RexShuttle() {
@Override
public RexNode visitInputRef(RexInputRef inputRef) {
if (isTimeIndicatorType(inputRef.getType())) {
return RexInputRef.of(
inputRef.getIndex(), leftRightFields);
} else {
return super.visitInputRef(inputRef);
}
}
});
return FlinkLogicalJoin.create(
newLeft, newRight, newCondition, join.getHints(), join.getJoinType());
}
}
private RelNode visitCorrelate(FlinkLogicalCorrelate correlate) {
// visit children and update inputs
RelNode newLeft = correlate.getLeft().accept(this);
RelNode newRight = correlate.getRight().accept(this);
if (newRight instanceof FlinkLogicalTableFunctionScan) {
FlinkLogicalTableFunctionScan newScan = (FlinkLogicalTableFunctionScan) newRight;
List<RelNode> newScanInputs =
newScan.getInputs().stream()
.map(input -> input.accept(this))
.collect(Collectors.toList());
// check if input field contains time indicator type
// materialize field if no time indicator is present anymore
// if input field is already materialized, change to timestamp type
RexTimeIndicatorMaterializer materializer = new RexTimeIndicatorMaterializer(newLeft);
RexNode newScanCall = newScan.getCall().accept(materializer);
newRight =
newScan.copy(
newScan.getTraitSet(),
newScanInputs,
newScanCall,
newScan.getElementType(),
newScan.getRowType(),
newScan.getColumnMappings());
}
return FlinkLogicalCorrelate.create(
newLeft,
newRight,
correlate.getCorrelationId(),
correlate.getRequiredColumns(),
correlate.getJoinType());
}
private RelNode visitSimpleRel(RelNode node) {
List<RelNode> newInputs =
node.getInputs().stream()
.map(input -> input.accept(this))
.collect(Collectors.toList());
return node.copy(node.getTraitSet(), newInputs);
}
private RelNode visitSetOp(SetOp setOp) {
RelNode convertedSetOp = visitSimpleRel(setOp);
// make sure that time indicator types match
List<RelDataTypeField> headInputFields =
convertedSetOp.getInputs().get(0).getRowType().getFieldList();
int fieldCnt = headInputFields.size();
for (int inputIdx = 1; inputIdx < convertedSetOp.getInputs().size(); inputIdx++) {
List<RelDataTypeField> currentInputFields =
convertedSetOp.getInputs().get(inputIdx).getRowType().getFieldList();
for (int fieldIdx = 0; fieldIdx < fieldCnt; fieldIdx++) {
RelDataType headFieldType = headInputFields.get(fieldIdx).getType();
RelDataType currentInputFieldType = currentInputFields.get(fieldIdx).getType();
validateType(currentInputFieldType, headFieldType);
}
}
return convertedSetOp;
}
private RelNode visitSink(SingleRel sink) {
Preconditions.checkArgument(
sink instanceof FlinkLogicalLegacySink || sink instanceof FlinkLogicalSink);
RelNode newInput = sink.getInput().accept(this);
newInput = materializeProcTime(newInput);
return sink.copy(sink.getTraitSet(), Collections.singletonList(newInput));
}
private FlinkLogicalAggregate visitAggregate(FlinkLogicalAggregate agg) {
RelNode newInput = convertAggInput(agg);
List<AggregateCall> updatedAggCalls = convertAggregateCalls(agg);
return (FlinkLogicalAggregate)
agg.copy(
agg.getTraitSet(),
newInput,
agg.getGroupSet(),
agg.getGroupSets(),
updatedAggCalls);
}
private RelNode convertAggInput(Aggregate agg) {
RelNode newInput = agg.getInput().accept(this);
// materialize aggregation arguments/grouping keys
Set<Integer> timeIndicatorIndices = gatherIndicesToMaterialize(agg, newInput);
return materializeTimeIndicators(newInput, timeIndicatorIndices);
}
private Set<Integer> gatherIndicesToMaterialize(Aggregate agg, RelNode newInput) {
List<RelDataType> inputFieldTypes = RelOptUtil.getFieldTypeList(newInput.getRowType());
Predicate<Integer> isTimeIndicator = idx -> isTimeIndicatorType(inputFieldTypes.get(idx));
// add arguments of agg calls
Set<Integer> aggCallArgs =
agg.getAggCallList().stream()
.map(AggregateCall::getArgList)
.flatMap(List::stream)
.filter(isTimeIndicator)
.collect(Collectors.toSet());
FlinkRelMetadataQuery fmq =
FlinkRelMetadataQuery.reuseOrCreate(agg.getCluster().getMetadataQuery());
RelWindowProperties windowProps = fmq.getRelWindowProperties(newInput);
// add grouping sets
Set<Integer> groupSets =
agg.getGroupSets().stream()
.map(
grouping -> {
if (windowProps != null
&& groupingContainsWindowStartEnd(
grouping, windowProps)) {
// for window aggregate we should reserve the time attribute
// of window_time column
return grouping.except(windowProps.getWindowTimeColumns());
} else {
return grouping;
}
})
.flatMap(set -> set.asList().stream())
.filter(isTimeIndicator)
.collect(Collectors.toSet());
Set<Integer> timeIndicatorIndices = new HashSet<>(aggCallArgs);
timeIndicatorIndices.addAll(groupSets);
return timeIndicatorIndices;
}
private List<AggregateCall> convertAggregateCalls(Aggregate agg) {
// remove time indicator type as agg call return type
return agg.getAggCallList().stream()
.map(
call -> {
if (isTimeIndicatorType(call.getType())) {
RelDataType callType =
timestamp(
call.getType().isNullable(),
isTimestampLtzType(call.getType()));
return AggregateCall.create(
call.getAggregation(),
call.isDistinct(),
false,
false,
call.rexList,
call.getArgList(),
call.filterArg,
null,
RelCollations.EMPTY,
callType,
call.name);
} else {
return call;
}
})
.collect(Collectors.toList());
}
private RelNode visitTableAggregate(FlinkLogicalTableAggregate tableAgg) {
FlinkLogicalAggregate correspondingAgg =
FlinkLogicalAggregate.create(
tableAgg.getInput(),
tableAgg.getGroupSet(),
tableAgg.getGroupSets(),
tableAgg.getAggCallList(),
Collections.emptyList());
FlinkLogicalAggregate convertedAgg = visitAggregate(correspondingAgg);
return new FlinkLogicalTableAggregate(
tableAgg.getCluster(),
tableAgg.getTraitSet(),
convertedAgg.getInput(),
convertedAgg.getGroupSet(),
convertedAgg.getGroupSets(),
convertedAgg.getAggCallList());
}
private FlinkLogicalWindowAggregate visitWindowAggregate(FlinkLogicalWindowAggregate agg) {
RelNode newInput = convertAggInput(agg);
List<AggregateCall> updatedAggCalls = convertAggregateCalls(agg);
return new FlinkLogicalWindowAggregate(
agg.getCluster(),
agg.getTraitSet(),
newInput,
agg.getGroupSet(),
updatedAggCalls,
agg.getWindow(),
agg.getNamedProperties());
}
private RelNode visitWindowTableAggregate(FlinkLogicalWindowTableAggregate tableAgg) {
FlinkLogicalWindowAggregate correspondingAgg =
new FlinkLogicalWindowAggregate(
tableAgg.getCluster(),
tableAgg.getTraitSet(),
tableAgg.getInput(),
tableAgg.getGroupSet(),
tableAgg.getAggCallList(),
tableAgg.getWindow(),
tableAgg.getNamedProperties());
FlinkLogicalWindowAggregate convertedWindowAgg = visitWindowAggregate(correspondingAgg);
return new FlinkLogicalWindowTableAggregate(
tableAgg.getCluster(),
tableAgg.getTraitSet(),
convertedWindowAgg.getInput(),
tableAgg.getGroupSet(),
tableAgg.getGroupSets(),
convertedWindowAgg.getAggCallList(),
tableAgg.getWindow(),
tableAgg.getNamedProperties());
}
private RelNode visitMultiJoin(FlinkLogicalMultiJoin multiJoin) {
// visit and materialize children
final List<RelNode> newInputs =
multiJoin.getInputs().stream()
.map(input -> input.accept(this))
.map(this::materializeTimeIndicators)
.collect(Collectors.toList());
final List<RelDataType> allFields =
newInputs.stream()
.flatMap(input -> RelOptUtil.getFieldTypeList(input.getRowType()).stream())
.collect(Collectors.toList());
RexTimeIndicatorMaterializer materializer = new RexTimeIndicatorMaterializer(allFields);
final RexNode newJoinFilter = multiJoin.getJoinFilter().accept(materializer);
final List<RexNode> newJoinConditions =
multiJoin.getJoinConditions().stream()
.map(cond -> cond == null ? null : cond.accept(materializer))
.collect(Collectors.toList());
final RexNode newPostJoinFilter =
multiJoin.getPostJoinFilter() == null
? null
: multiJoin.getPostJoinFilter().accept(materializer);
// materialize all output types and remove special time indicator types
RelDataType newOutputType = getRowTypeWithoutTimeIndicator(multiJoin.getRowType());
return FlinkLogicalMultiJoin.create(
multiJoin.getCluster(),
newInputs,
newJoinFilter,
newOutputType,
newJoinConditions,
multiJoin.getJoinTypes(),
newPostJoinFilter,
multiJoin.getHints());
}
private RelNode visitInvalidRel(RelNode node) {
throw new TableException(
String.format(
"This is a bug and should not happen. Please file an issue. Unknown node %s.",
node.getRelTypeName()));
}
// ----------------------------------------------------------------------------------------
// Utility
// ----------------------------------------------------------------------------------------
private RelNode materializeProcTime(RelNode node) {
// there is no need to add a redundant calc to materialize proc-time if input is empty
// values. Otherwise we need add a PruneEmptyRules after the RelTimeIndicatorConverter to
// remove the redundant calc.
if (node instanceof FlinkLogicalValues
&& FlinkLogicalValues.isEmpty((FlinkLogicalValues) node)) {
return node;
}
Set<Integer> procTimeFieldIndices = gatherProcTimeIndices(node);
return materializeTimeIndicators(node, procTimeFieldIndices);
}
private RelNode materializeTimeIndicators(RelNode node) {
Set<Integer> timeFieldIndices = gatherTimeAttributeIndices(node);
return materializeTimeIndicators(node, timeFieldIndices);
}
private RelNode materializeTimeIndicators(RelNode node, Set<Integer> timeIndicatorIndices) {
if (timeIndicatorIndices.isEmpty()) {
return node;
}
// insert or merge with input calc if
// a time attribute is accessed and needs to be materialized
if (node instanceof FlinkLogicalCalc) {
// merge original calc
return mergeCalcToMaterializeTimeIndicators(
(FlinkLogicalCalc) node, timeIndicatorIndices);
} else {
return createCalcToMaterializeTimeIndicators(node, timeIndicatorIndices);
}
}
private RelNode mergeCalcToMaterializeTimeIndicators(
FlinkLogicalCalc calc, Set<Integer> refIndices) {
RexProgram program = calc.getProgram();
RexProgramBuilder newProgramBuilder =
new RexProgramBuilder(program.getInputRowType(), rexBuilder);
for (int idx = 0; idx < program.getNamedProjects().size(); idx++) {
Pair<RexLocalRef, String> pair = program.getNamedProjects().get(idx);
RexNode project = program.expandLocalRef(pair.left);
if (refIndices.contains(idx)) {
project = materializeTimeIndicators(project);
}
newProgramBuilder.addProject(project, pair.right);
}
if (program.getCondition() != null) {
newProgramBuilder.addCondition(program.expandLocalRef(program.getCondition()));
}
RexProgram newProgram = newProgramBuilder.getProgram();
return FlinkLogicalCalc.create(calc.getInput(), newProgram);
}
private RelNode createCalcToMaterializeTimeIndicators(RelNode input, Set<Integer> refIndices) {
// create new calc
List<RexNode> projects =
input.getRowType().getFieldList().stream()
.map(
field -> {
RexNode project =
new RexInputRef(field.getIndex(), field.getType());
if (refIndices.contains(field.getIndex())) {
project = materializeTimeIndicators(project);
}
return project;
})
.collect(Collectors.toList());
RexProgram newProgram =
RexProgram.create(
input.getRowType(),
projects,
null,
input.getRowType().getFieldNames(),
rexBuilder);
return FlinkLogicalCalc.create(input, newProgram);
}
private RexNode materializeTimeIndicators(RexNode expr) {
if (isRowtimeIndicatorType(expr.getType())) {
// cast rowTime indicator to regular timestamp
return rexBuilder.makeAbstractCast(
timestamp(expr.getType().isNullable(), isTimestampLtzType(expr.getType())),
expr);
} else if (isProctimeIndicatorType(expr.getType())) {
// generate procTime access
return rexBuilder.makeCall(FlinkSqlOperatorTable.PROCTIME_MATERIALIZE, expr);
} else {
return expr;
}
}
private void validateType(RelDataType l, RelDataType r) {
boolean isValid;
// check if time indicators match
if (isTimeIndicatorType(l) && isTimeIndicatorType(r)) {
boolean leftIsEventTime = ((TimeIndicatorRelDataType) l).isEventTime();
boolean rightIsEventTime = ((TimeIndicatorRelDataType) r).isEventTime();
isValid = leftIsEventTime == rightIsEventTime;
} else {
isValid = !isTimeIndicatorType(l) && !isTimeIndicatorType(r);
}
if (!isValid) {
throw new ValidationException(
String.format(
"Union fields with time attributes requires same types, but the types are %s and %s.",
l, r));
}
}
private RelDataType getRowTypeWithoutTimeIndicator(RelDataType relType) {
return getRowTypeWithoutTimeIndicator(relType, s -> true);
}
private RelDataType getRowTypeWithoutTimeIndicator(
RelDataType relType, Predicate<String> shouldMaterialize) {
Map<String, RelDataType> convertedFields =
relType.getFieldList().stream()
.map(
field -> {
RelDataType fieldType = field.getType();
if (isTimeIndicatorType(fieldType)
&& shouldMaterialize.test(field.getName())) {
fieldType =
timestamp(
fieldType.isNullable(),
isTimestampLtzType(fieldType));
}
return Tuple2.of(field.getName(), fieldType);
})
.collect(
Collectors.toMap(
t -> t.f0, t -> t.f1, (e1, e2) -> e1, LinkedHashMap::new));
return rexBuilder.getTypeFactory().builder().addAll(convertedFields.entrySet()).build();
}
private Set<Integer> gatherProcTimeIndices(RelNode node) {
return gatherTimeAttributeIndices(node, f -> isProctimeIndicatorType(f.getType()));
}
private Set<Integer> gatherTimeAttributeIndices(RelNode node) {
return gatherTimeAttributeIndices(node, f -> isTimeIndicatorType(f.getType()));
}
private Set<Integer> gatherTimeAttributeIndices(
RelNode node, Predicate<RelDataTypeField> predicate) {
return node.getRowType().getFieldList().stream()
.filter(predicate)
.map(RelDataTypeField::getIndex)
.collect(Collectors.toSet());
}
private RelDataType timestamp(boolean isNullable, boolean isTimestampLtzIndicator) {
LogicalType logicalType;
if (isTimestampLtzIndicator) {
logicalType = new LocalZonedTimestampType(isNullable, 3);
} else {
logicalType = new TimestampType(isNullable, 3);
}
return ((FlinkTypeFactory) rexBuilder.getTypeFactory())
.createFieldTypeFromLogicalType(logicalType);
}
private boolean isTimestampLtzType(RelDataType type) {
return type.getSqlTypeName().equals(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
}
// ----------------------------------------------------------------------------------------
// Materializer for RexNode including time indicator
// ----------------------------------------------------------------------------------------
private
|
RelTimeIndicatorConverter
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ComparableTypeTest.java
|
{
"start": 5803,
"end": 5922
}
|
class ____ extends AClass {
@Override
public int compareTo(AClass o) {
return 0;
}
}
abstract
|
BClass
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/target/TargetTest.java
|
{
"start": 593,
"end": 2557
}
|
class ____ {
@AfterEach
public void afterEach(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
}
@Test
public void testTargetOnEmbedded(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
LuggageImpl l = new LuggageImpl();
l.setHeight( 12 );
l.setWidth( 12 );
Owner o = new OwnerImpl();
o.setName( "Emmanuel" );
l.setOwner( o );
session.persist( l );
session.flush();
session.clear();
l = session.find( LuggageImpl.class, l.getId() );
assertEquals( "Emmanuel", l.getOwner().getName() );
}
);
}
@Test
public void testTargetOnMapKey(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Luggage l = new LuggageImpl();
l.setHeight( 12 );
l.setWidth( 12 );
Size size = new SizeImpl();
size.setName( "S" );
Owner o = new OwnerImpl();
o.setName( "Emmanuel" );
l.setOwner( o );
session.persist( l );
Brand b = new Brand();
session.persist( b );
b.getLuggagesBySize().put( size, l );
session.flush();
session.clear();
b = session.find( Brand.class, b.getId() );
assertEquals( "S", b.getLuggagesBySize().keySet().iterator().next().getName() );
}
);
}
@Test
public void testTargetOnMapKeyManyToMany(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Luggage l = new LuggageImpl();
l.setHeight( 12 );
l.setWidth( 12 );
Size size = new SizeImpl();
size.setName( "S" );
Owner o = new OwnerImpl();
o.setName( "Emmanuel" );
l.setOwner( o );
session.persist( l );
Brand b = new Brand();
session.persist( b );
b.getSizePerLuggage().put( l, size );
session.flush();
session.clear();
b = session.find( Brand.class, b.getId() );
assertEquals( 12d, b.getSizePerLuggage().keySet().iterator().next().getWidth(), 0.01 );
}
);
}
}
|
TargetTest
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/test/java/org/springframework/messaging/support/MessageBuilderTests.java
|
{
"start": 1236,
"end": 9619
}
|
class ____ {
@Test
void simpleMessageCreation() {
Message<String> message = MessageBuilder.withPayload("foo").build();
assertThat(message.getPayload()).isEqualTo("foo");
}
@Test
void headerValues() {
Message<String> message = MessageBuilder.withPayload("test")
.setHeader("foo", "bar")
.setHeader("count", 123)
.build();
assertThat(message.getHeaders().get("foo", String.class)).isEqualTo("bar");
assertThat(message.getHeaders().get("count", Integer.class)).isEqualTo(123);
}
@Test
void copiedHeaderValues() {
Message<String> message1 = MessageBuilder.withPayload("test1")
.setHeader("foo", "1")
.setHeader("bar", "2")
.build();
Message<String> message2 = MessageBuilder.withPayload("test2")
.copyHeaders(message1.getHeaders())
.setHeader("foo", "42")
.setHeaderIfAbsent("bar", "99")
.build();
assertThat(message1.getPayload()).isEqualTo("test1");
assertThat(message2.getPayload()).isEqualTo("test2");
assertThat(message1.getHeaders().get("foo")).isEqualTo("1");
assertThat(message2.getHeaders().get("foo")).isEqualTo("42");
assertThat(message1.getHeaders().get("bar")).isEqualTo("2");
assertThat(message2.getHeaders().get("bar")).isEqualTo("2");
}
@Test
void idHeaderValueReadOnly() {
UUID id = UUID.randomUUID();
assertThatIllegalArgumentException().isThrownBy(() ->
MessageBuilder.withPayload("test").setHeader(MessageHeaders.ID, id));
}
@Test
void timestampValueReadOnly() {
Long timestamp = 12345L;
assertThatIllegalArgumentException().isThrownBy(() ->
MessageBuilder.withPayload("test").setHeader(MessageHeaders.TIMESTAMP, timestamp).build());
}
@Test
void copyHeadersIfAbsent() {
Message<String> message1 = MessageBuilder.withPayload("test1")
.setHeader("foo", "bar").build();
Message<String> message2 = MessageBuilder.withPayload("test2")
.setHeader("foo", 123)
.copyHeadersIfAbsent(message1.getHeaders())
.build();
assertThat(message2.getPayload()).isEqualTo("test2");
assertThat(message2.getHeaders().get("foo")).isEqualTo(123);
}
@Test
void createFromMessage() {
Message<String> message1 = MessageBuilder.withPayload("test")
.setHeader("foo", "bar").build();
Message<String> message2 = MessageBuilder.fromMessage(message1).build();
assertThat(message2.getPayload()).isEqualTo("test");
assertThat(message2.getHeaders().get("foo")).isEqualTo("bar");
}
@Test // gh-23417
void createErrorMessageFromErrorMessage() {
Message<String> source = MessageBuilder.withPayload("test").setHeader("foo", "bar").build();
RuntimeException ex = new RuntimeException();
ErrorMessage errorMessage1 = new ErrorMessage(ex, Collections.singletonMap("baz", "42"), source);
Message<Throwable> errorMessage2 = MessageBuilder.fromMessage(errorMessage1).build();
assertThat(errorMessage2).isExactlyInstanceOf(ErrorMessage.class);
ErrorMessage actual = (ErrorMessage) errorMessage2;
assertThat(actual.getPayload()).isSameAs(ex);
assertThat(actual.getHeaders().get("baz")).isEqualTo("42");
assertThat(actual.getOriginalMessage()).isSameAs(source);
}
@Test
void createIdRegenerated() {
Message<String> message1 = MessageBuilder.withPayload("test")
.setHeader("foo", "bar").build();
Message<String> message2 = MessageBuilder.fromMessage(message1).setHeader("another", 1).build();
assertThat(message2.getHeaders().get("foo")).isEqualTo("bar");
assertThat(message2.getHeaders().getId()).isNotSameAs(message1.getHeaders().getId());
}
@Test
void remove() {
Message<Integer> message1 = MessageBuilder.withPayload(1)
.setHeader("foo", "bar").build();
Message<Integer> message2 = MessageBuilder.fromMessage(message1)
.removeHeader("foo")
.build();
assertThat(message2.getHeaders().containsKey("foo")).isFalse();
}
@Test
void settingToNullRemoves() {
Message<Integer> message1 = MessageBuilder.withPayload(1)
.setHeader("foo", "bar").build();
Message<Integer> message2 = MessageBuilder.fromMessage(message1)
.setHeader("foo", null)
.build();
assertThat(message2.getHeaders().containsKey("foo")).isFalse();
}
@Test
void notModifiedSameMessage() {
Message<?> original = MessageBuilder.withPayload("foo").build();
Message<?> result = MessageBuilder.fromMessage(original).build();
assertThat(result).isEqualTo(original);
}
@Test
void containsHeaderNotModifiedSameMessage() {
Message<?> original = MessageBuilder.withPayload("foo").setHeader("bar", 42).build();
Message<?> result = MessageBuilder.fromMessage(original).build();
assertThat(result).isEqualTo(original);
}
@Test
void sameHeaderValueAddedNotModifiedSameMessage() {
Message<?> original = MessageBuilder.withPayload("foo").setHeader("bar", 42).build();
Message<?> result = MessageBuilder.fromMessage(original).setHeader("bar", 42).build();
assertThat(result).isEqualTo(original);
}
@Test
void copySameHeaderValuesNotModifiedSameMessage() {
Date current = new Date();
Map<String, Object> originalHeaders = new HashMap<>();
originalHeaders.put("b", "xyz");
originalHeaders.put("c", current);
Message<?> original = MessageBuilder.withPayload("foo").setHeader("a", 123).copyHeaders(originalHeaders).build();
Map<String, Object> newHeaders = new HashMap<>();
newHeaders.put("a", 123);
newHeaders.put("b", "xyz");
newHeaders.put("c", current);
Message<?> result = MessageBuilder.fromMessage(original).copyHeaders(newHeaders).build();
assertThat(result).isEqualTo(original);
}
@Test
void buildMessageWithMutableHeaders() {
MessageHeaderAccessor accessor = new MessageHeaderAccessor();
accessor.setLeaveMutable(true);
MessageHeaders headers = accessor.getMessageHeaders();
Message<?> message = MessageBuilder.createMessage("payload", headers);
accessor.setHeader("foo", "bar");
assertThat(headers.get("foo")).isEqualTo("bar");
assertThat(MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class)).isSameAs(accessor);
}
@Test
void buildMessageWithDefaultMutability() {
MessageHeaderAccessor accessor = new MessageHeaderAccessor();
MessageHeaders headers = accessor.getMessageHeaders();
Message<?> message = MessageBuilder.createMessage("foo", headers);
assertThatIllegalStateException().isThrownBy(() ->
accessor.setHeader("foo", "bar"))
.withMessageContaining("Already immutable");
assertThat(MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class)).isSameAs(accessor);
}
@Test
void buildMessageWithoutIdAndTimestamp() {
MessageHeaderAccessor headerAccessor = new MessageHeaderAccessor();
headerAccessor.setIdGenerator(() -> MessageHeaders.ID_VALUE_NONE);
Message<?> message = MessageBuilder.createMessage("foo", headerAccessor.getMessageHeaders());
assertThat(message.getHeaders().getId()).isNull();
assertThat(message.getHeaders().getTimestamp()).isNull();
}
@Test
void buildMultipleMessages() {
MessageHeaderAccessor headerAccessor = new MessageHeaderAccessor();
MessageBuilder<?> messageBuilder = MessageBuilder.withPayload("payload").setHeaders(headerAccessor);
headerAccessor.setHeader("foo", "bar1");
Message<?> message1 = messageBuilder.build();
headerAccessor.setHeader("foo", "bar2");
Message<?> message2 = messageBuilder.build();
headerAccessor.setHeader("foo", "bar3");
Message<?> message3 = messageBuilder.build();
assertThat(message1.getHeaders().get("foo")).isEqualTo("bar1");
assertThat(message2.getHeaders().get("foo")).isEqualTo("bar2");
assertThat(message3.getHeaders().get("foo")).isEqualTo("bar3");
}
@Test // gh-34949
void buildMessageWithReplyChannelHeader() {
MessageHeaderAccessor headerAccessor = new MessageHeaderAccessor();
MessageBuilder<?> messageBuilder = MessageBuilder.withPayload("payload").setHeaders(headerAccessor);
headerAccessor.setHeader(MessageHeaders.REPLY_CHANNEL, "foo");
Message<?> message1 = messageBuilder.build();
assertThat(message1.getHeaders().get(MessageHeaders.REPLY_CHANNEL)).isEqualTo("foo");
headerAccessor.setHeader("hannel", 0);
Message<?> message2 = messageBuilder.build();
assertThat(message2.getHeaders().get("hannel")).isEqualTo(0);
assertThatIllegalArgumentException()
.isThrownBy(() -> headerAccessor.setHeader(MessageHeaders.REPLY_CHANNEL, 0))
.withMessage("'%s' header value must be a MessageChannel or String", MessageHeaders.REPLY_CHANNEL);
}
}
|
MessageBuilderTests
|
java
|
spring-projects__spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/fieldvalues/javac/JavaCompilerFieldValuesParser.java
|
{
"start": 6989,
"end": 8384
}
|
enum
____ (selectedMember != null && selectedMember.expression().equals(variableType)) {
return ConventionUtils.toDashedCase(selectedMember.identifier().toLowerCase(Locale.ENGLISH));
}
return null;
}
return null;
}
private Object getFactoryValue(ExpressionTree expression, Object factoryValue) {
Object durationValue = getFactoryValue(expression, factoryValue, DURATION_OF, DURATION_SUFFIX);
if (durationValue != null) {
return durationValue;
}
Object dataSizeValue = getFactoryValue(expression, factoryValue, DATA_SIZE_OF, DATA_SIZE_SUFFIX);
if (dataSizeValue != null) {
return dataSizeValue;
}
Object periodValue = getFactoryValue(expression, factoryValue, PERIOD_OF, PERIOD_SUFFIX);
if (periodValue != null) {
return periodValue;
}
return factoryValue;
}
private Object getFactoryValue(ExpressionTree expression, Object factoryValue, String prefix,
Map<String, String> suffixMapping) {
Object instance = expression.getInstance();
if (instance != null && instance.toString().startsWith(prefix)) {
String type = instance.toString();
type = type.substring(prefix.length(), type.indexOf('('));
String suffix = suffixMapping.get(type);
return (suffix != null) ? factoryValue + suffix : null;
}
return null;
}
Map<String, Object> getFieldValues() {
return this.fieldValues;
}
}
}
|
if
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java
|
{
"start": 1477,
"end": 3702
}
|
class ____ implements AbstractUsersManager {
private static final Logger LOG =
LoggerFactory.getLogger(ActiveUsersManager.class);
private final QueueMetrics metrics;
private int activeUsers = 0;
private Map<String, Set<ApplicationId>> usersApplications =
new HashMap<String, Set<ApplicationId>>();
public ActiveUsersManager(QueueMetrics metrics) {
this.metrics = metrics;
}
/**
* An application has new outstanding requests.
*
* @param user application user
* @param applicationId activated application
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public void activateApplication(
String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps == null) {
userApps = new HashSet<ApplicationId>();
usersApplications.put(user, userApps);
++activeUsers;
metrics.incrActiveUsers();
LOG.debug("User {} added to activeUsers, currently: {}", user,
activeUsers);
}
if (userApps.add(applicationId)) {
metrics.activateApp(user);
}
}
/**
* An application has no more outstanding requests.
*
* @param user application user
* @param applicationId deactivated application
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public void deactivateApplication(
String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps != null) {
if (userApps.remove(applicationId)) {
metrics.deactivateApp(user);
}
if (userApps.isEmpty()) {
usersApplications.remove(user);
--activeUsers;
metrics.decrActiveUsers();
LOG.debug("User {} removed from activeUsers, currently: {}", user,
activeUsers);
}
}
}
/**
* Get number of active users i.e. users with applications which have pending
* resource requests.
* @return number of active users
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public int getNumActiveUsers() {
return activeUsers;
}
}
|
ActiveUsersManager
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest54.java
|
{
"start": 893,
"end": 1617
}
|
class ____ extends TestCase {
public void test_true() throws Exception {
WallProvider provider = new MySqlWallProvider();
provider.getConfig().setSchemaCheck(false);
assertTrue(provider.checkValid(//
"SELECT *FROM T UNION select `ENGINE`, `SUPPORT` from information_schema.Engines"));
assertEquals(2, provider.getTableStats().size());
}
public void test_false() throws Exception {
WallProvider provider = new MySqlWallProvider();
assertFalse(provider.checkValid(//
"SELECT *FROM T UNION select `ENGINE`, `SUPPORT` from information_schema.Engines"));
assertEquals(2, provider.getTableStats().size());
}
}
|
MySqlWallTest54
|
java
|
google__auto
|
service/processor/src/main/java/com/google/auto/service/processor/AutoServiceProcessor.java
|
{
"start": 8031,
"end": 8327
}
|
class ____ interface). For ServiceLoader, we could also check that it has
// a public no-arg constructor. But it turns out that people also use AutoService in contexts
// where the META-INF/services entries are read by things other than ServiceLoader. Those things
// still require the
|
or
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java
|
{
"start": 6934,
"end": 8532
}
|
class ____ extends AdaptingAggregator implements SingleBucketAggregator {
private final String name;
private final FiltersAggregator innerAggregator;
FilterAggregator(String name, Aggregator parent, AggregatorFactories subAggregators, FiltersAggregator innerAggregator)
throws IOException {
super(parent, subAggregators, aggregatorFactories -> innerAggregator);
this.name = name;
this.innerAggregator = innerAggregator;
}
@Override
protected InternalAggregation adapt(InternalAggregation delegateResult) throws IOException {
InternalFilters innerResult = (InternalFilters) delegateResult;
var innerBucket = innerResult.getBuckets().get(0);
return new InternalFilter(name, innerBucket.getDocCount(), innerBucket.getAggregations(), innerResult.getMetadata());
}
@Override
public Aggregator resolveSortPath(AggregationPath.PathElement next, Iterator<AggregationPath.PathElement> path) {
return resolveSortPathOnValidAgg(next, path);
}
@Override
public BucketComparator bucketComparator(String key, SortOrder order) {
if (key == null || "doc_count".equals(key)) {
return (lhs, rhs) -> order.reverseMul() * Long.compare(
innerAggregator.bucketDocCount(lhs),
innerAggregator.bucketDocCount(rhs)
);
} else {
return super.bucketComparator(key, order);
}
}
}
}
|
FilterAggregator
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
|
{
"start": 7256,
"end": 57434
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestFifoScheduler.class);
private final int GB = 1024;
private ResourceManager resourceManager = null;
private static Configuration conf;
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private final static ContainerUpdates NULL_UPDATE_REQUESTS =
new ContainerUpdates();
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,
FifoScheduler.class, ResourceScheduler.class);
resourceManager = new MockRM(conf);
}
@AfterEach
public void tearDown() throws Exception {
resourceManager.stop();
}
private NodeManager registerNode(String hostName, int containerManagerPort,
int nmHttpPort, String rackName,
Resource capability, NodeStatus nodeStatus)
throws IOException, YarnException {
NodeManager nm = new NodeManager(hostName, containerManagerPort,
nmHttpPort, rackName, capability, resourceManager, nodeStatus);
NodeAddedSchedulerEvent nodeAddEvent1 =
new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes()
.get(nm.getNodeId()));
resourceManager.getResourceScheduler().handle(nodeAddEvent1);
return nm;
}
private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) {
ApplicationId appIdImpl = ApplicationId.newInstance(0, appId);
ApplicationAttemptId attId =
ApplicationAttemptId.newInstance(appIdImpl, attemptId);
return attId;
}
private ResourceRequest createResourceRequest(int memory, String host,
int priority, int numContainers) {
ResourceRequest request = recordFactory
.newRecordInstance(ResourceRequest.class);
request.setCapability(Resources.createResource(memory));
request.setResourceName(host);
request.setNumContainers(numContainers);
Priority prio = recordFactory.newRecordInstance(Priority.class);
prio.setPriority(priority);
request.setPriority(prio);
return request;
}
@Test
@Timeout(value = 5)
public void testFifoSchedulerCapacityWhenNoNMs() {
FifoScheduler scheduler = new FifoScheduler();
QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false);
assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
}
@Test
@Timeout(value = 5)
public void testAppAttemptMetrics() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
FifoScheduler scheduler = new FifoScheduler();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
RMContext rmContext = new RMContextImpl(dispatcher, null,
null, null, null, null, null, null, null, scheduler);
((RMContextImpl) rmContext).setSystemMetricsPublisher(
mock(SystemMetricsPublisher.class));
Configuration conf = new Configuration();
((RMContextImpl) rmContext).setScheduler(scheduler);
((RMContextImpl) rmContext).setYarnConfiguration(conf);
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, rmContext);
QueueMetrics metrics = scheduler.getRootQueueMetrics();
int beforeAppsSubmitted = metrics.getAppsSubmitted();
ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
appId, 1);
SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
scheduler.handle(appEvent);
SchedulerEvent attemptEvent =
new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
SchedulerEvent attemptEvent2 =
new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent2);
int afterAppsSubmitted = metrics.getAppsSubmitted();
assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
scheduler.stop();
}
@Test
@Timeout(value = 2)
public void testNodeLocalAssignment() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
Configuration conf = new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager =
new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager =
new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
FifoScheduler scheduler = new FifoScheduler();
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
AllocationTagsManager ptm = mock(AllocationTagsManager.class);
rmContext.setAllocationTagsManager(ptm);
rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
rmContext.setRMApplicationHistoryWriter(
mock(RMApplicationHistoryWriter.class));
((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(), rmContext);
RMNode node0 = MockNodes.newNodeInfo(1,
Resources.createResource(1024 * 64), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
int _appId = 1;
int _appAttemptId = 1;
ApplicationAttemptId appAttemptId = createAppAttemptId(_appId,
_appAttemptId);
createMockRMApp(appAttemptId, rmContext);
AppAddedSchedulerEvent appEvent =
new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1",
"user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent =
new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
int memory = 64;
int nConts = 3;
int priority = 20;
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocal = createResourceRequest(memory,
node0.getHostName(), priority, nConts);
ResourceRequest rackLocal = createResourceRequest(memory,
node0.getRackName(), priority, nConts);
ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority,
nConts);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(),
null, null, NULL_UPDATE_REQUESTS);
NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
// Before the node update event, there are 3 local requests outstanding
assertEquals(3, nodeLocal.getNumContainers());
scheduler.handle(node0Update);
// After the node update event, check that there are no more local requests
// outstanding
assertEquals(0, nodeLocal.getNumContainers());
//Also check that the containers were scheduled
SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
assertEquals(3, info.getLiveContainers().size());
scheduler.stop();
}
@Test
@Timeout(value = 2)
public void testUpdateResourceOnNode() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
Configuration conf = new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager =
new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager =
new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
FifoScheduler scheduler = new FifoScheduler();
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
AllocationTagsManager ptm = mock(AllocationTagsManager.class);
rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
nlm.init(new Configuration());
rmContext.setNodeLabelManager(nlm);
rmContext.setAllocationTagsManager(ptm);
scheduler.setRMContext(rmContext);
((RMContextImpl) rmContext).setScheduler(scheduler);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(), rmContext);
RMNode node0 = MockNodes.newNodeInfo(1,
Resources.createResource(2048, 4), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
assertThat(scheduler.getNumClusterNodes()).isEqualTo(1);
Resource newResource = Resources.createResource(1024, 4);
NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new
NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance(
newResource, ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
scheduler.handle(node0ResourceUpdate);
// SchedulerNode's total resource and available resource are changed.
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID())
.getTotalResource().getMemorySize());
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).
getUnallocatedResource().getMemorySize(), 1024);
QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false);
assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
int _appId = 1;
int _appAttemptId = 1;
ApplicationAttemptId appAttemptId = createAppAttemptId(_appId,
_appAttemptId);
createMockRMApp(appAttemptId, rmContext);
AppAddedSchedulerEvent appEvent =
new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1",
"user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent =
new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
int memory = 1024;
int priority = 1;
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocal = createResourceRequest(memory,
node0.getHostName(), priority, 1);
ResourceRequest rackLocal = createResourceRequest(memory,
node0.getRackName(), priority, 1);
ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority,
1);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(),
null, null, NULL_UPDATE_REQUESTS);
// Before the node update event, there are one local request
assertEquals(1, nodeLocal.getNumContainers());
NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
// Now schedule.
scheduler.handle(node0Update);
// After the node update event, check no local request
assertEquals(0, nodeLocal.getNumContainers());
// Also check that one container was scheduled
SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
assertEquals(1, info.getLiveContainers().size());
// And check the default Queue now is full.
queueInfo = scheduler.getQueueInfo(null, false, false);
assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f);
}
// @Test
public void testFifoScheduler() throws Exception {
LOG.info("--- START: testFifoScheduler ---");
final int GB = 1024;
NodeStatus mockNodeStatus = createMockNodeStatus();
// Register node1
String host_0 = "host_0";
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK,
Resources.createResource(4 * GB, 1), mockNodeStatus);
nm_0.heartbeat();
// Register node2
String host_1 = "host_1";
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 =
registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK,
Resources.createResource(2 * GB, 1), mockNodeStatus);
nm_1.heartbeat();
// ResourceRequest priorities
Priority priority_0 = Priority.newInstance(0);
Priority priority_1 = Priority.newInstance(1);
// Submit an application
Application application_0 = new Application("user_0", resourceManager);
application_0.submit();
application_0.addNodeManager(host_0, 1234, nm_0);
application_0.addNodeManager(host_1, 1234, nm_1);
Resource capability_0_0 = Resources.createResource(GB);
application_0.addResourceRequestSpec(priority_1, capability_0_0);
Resource capability_0_1 = Resources.createResource(2 * GB);
application_0.addResourceRequestSpec(priority_0, capability_0_1);
Task task_0_0 = new Task(application_0, priority_1,
new String[] {host_0, host_1});
application_0.addTask(task_0_0);
// Submit another application
Application application_1 = new Application("user_1", resourceManager);
application_1.submit();
application_1.addNodeManager(host_0, 1234, nm_0);
application_1.addNodeManager(host_1, 1234, nm_1);
Resource capability_1_0 = Resources.createResource(3 * GB);
application_1.addResourceRequestSpec(priority_1, capability_1_0);
Resource capability_1_1 = Resources.createResource(4 * GB);
application_1.addResourceRequestSpec(priority_0, capability_1_1);
Task task_1_0 = new Task(application_1, priority_1,
new String[] {host_0, host_1});
application_1.addTask(task_1_0);
// Send resource requests to the scheduler
LOG.info("Send resource requests to the scheduler");
application_0.schedule();
application_1.schedule();
// Send a heartbeat to kick the tires on the Scheduler
LOG.info("Send a heartbeat to kick the tires on the Scheduler... " +
"nm0 -> task_0_0 and task_1_0 allocated, used=4G " +
"nm1 -> nothing allocated");
nm_0.heartbeat(); // task_0_0 and task_1_0 allocated, used=4G
nm_1.heartbeat(); // nothing allocated
// Get allocations from the scheduler
application_0.schedule(); // task_0_0
checkApplicationResourceUsage(GB, application_0);
application_1.schedule(); // task_1_0
checkApplicationResourceUsage(3 * GB, application_1);
nm_0.heartbeat();
nm_1.heartbeat();
checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G)
checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available
LOG.info("Adding new tasks...");
Task task_1_1 = new Task(application_1, priority_1,
new String[] {ResourceRequest.ANY});
application_1.addTask(task_1_1);
Task task_1_2 = new Task(application_1, priority_1,
new String[] {ResourceRequest.ANY});
application_1.addTask(task_1_2);
Task task_1_3 = new Task(application_1, priority_0,
new String[] {ResourceRequest.ANY});
application_1.addTask(task_1_3);
application_1.schedule();
Task task_0_1 = new Task(application_0, priority_1,
new String[] {host_0, host_1});
application_0.addTask(task_0_1);
Task task_0_2 = new Task(application_0, priority_1,
new String[] {host_0, host_1});
application_0.addTask(task_0_2);
Task task_0_3 = new Task(application_0, priority_0,
new String[] {ResourceRequest.ANY});
application_0.addTask(task_0_3);
application_0.schedule();
// Send a heartbeat to kick the tires on the Scheduler
LOG.info("Sending hb from " + nm_0.getHostName());
nm_0.heartbeat(); // nothing new, used=4G
LOG.info("Sending hb from " + nm_1.getHostName());
nm_1.heartbeat(); // task_0_3, used=2G
// Get allocations from the scheduler
LOG.info("Trying to allocate...");
application_0.schedule();
checkApplicationResourceUsage(3 * GB, application_0);
application_1.schedule();
checkApplicationResourceUsage(3 * GB, application_1);
nm_0.heartbeat();
nm_1.heartbeat();
checkNodeResourceUsage(4*GB, nm_0);
checkNodeResourceUsage(2*GB, nm_1);
// Complete tasks
LOG.info("Finishing up task_0_0");
application_0.finishTask(task_0_0); // Now task_0_1
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(3 * GB, application_0);
checkApplicationResourceUsage(3 * GB, application_1);
checkNodeResourceUsage(4*GB, nm_0);
checkNodeResourceUsage(2*GB, nm_1);
LOG.info("Finishing up task_1_0");
application_1.finishTask(task_1_0); // Now task_0_2
application_0.schedule(); // final overcommit for app0 caused here
application_1.schedule();
nm_0.heartbeat(); // final overcommit for app0 occurs here
nm_1.heartbeat();
checkApplicationResourceUsage(4 * GB, application_0);
checkApplicationResourceUsage(0 * GB, application_1);
//checkNodeResourceUsage(1*GB, nm_0); // final over-commit -> rm.node->1G, test.node=2G
checkNodeResourceUsage(2*GB, nm_1);
LOG.info("Finishing up task_0_3");
application_0.finishTask(task_0_3); // No more
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(2 * GB, application_0);
checkApplicationResourceUsage(0 * GB, application_1);
//checkNodeResourceUsage(2*GB, nm_0); // final over-commit, rm.node->1G, test.node->2G
checkNodeResourceUsage(0*GB, nm_1);
LOG.info("Finishing up task_0_1");
application_0.finishTask(task_0_1);
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(1 * GB, application_0);
checkApplicationResourceUsage(0 * GB, application_1);
LOG.info("Finishing up task_0_2");
application_0.finishTask(task_0_2); // now task_1_3 can go!
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(0 * GB, application_0);
checkApplicationResourceUsage(4 * GB, application_1);
LOG.info("Finishing up task_1_3");
application_1.finishTask(task_1_3); // now task_1_1
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(0 * GB, application_0);
checkApplicationResourceUsage(3 * GB, application_1);
LOG.info("Finishing up task_1_1");
application_1.finishTask(task_1_1);
application_0.schedule();
application_1.schedule();
nm_0.heartbeat();
nm_1.heartbeat();
checkApplicationResourceUsage(0 * GB, application_0);
checkApplicationResourceUsage(3 * GB, application_1);
LOG.info("--- END: testFifoScheduler ---");
}
@Test
public void testGetAppsInQueue() throws Exception {
Application application_0 = new Application("user_0", resourceManager);
application_0.submit();
Application application_1 = new Application("user_0", resourceManager);
application_1.submit();
ResourceScheduler scheduler = resourceManager.getResourceScheduler();
List<ApplicationAttemptId> appsInDefault = scheduler.getAppsInQueue("default");
assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId()));
assertEquals(2, appsInDefault.size());
assertNull(scheduler.getAppsInQueue("someotherqueue"));
}
@Test
public void testAddAndRemoveAppFromFiFoScheduler() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
@SuppressWarnings("unchecked")
AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode> fs =
(AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>) rm
.getResourceScheduler();
TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler(
fs.getSchedulerApplications(), fs, "queue");
}
@Test
@Timeout(value = 30)
public void testConfValidation() throws Exception {
FifoScheduler scheduler = new FifoScheduler();
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is"
+ " larger than the max memory allocation.");
} catch (YarnRuntimeException e) {
// Exception is expected.
assertTrue(e.getMessage().startsWith("Invalid resource scheduler memory"),
"The thrown exception is not the expected one.");
}
}
@Test
@Timeout(value = 60)
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified()
throws Exception {
GenericTestUtils.setRootLogLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
// add request for containers
List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
am1.allocate(requests, null); // send the request
try {
// kick the schedule
nm1.nodeHeartbeat(true);
} catch (NullPointerException e) {
fail("NPE when allocating container on node but "
+ "forget to set off-switch request should be handled");
}
rm.stop();
}
@Test
@Timeout(value = 60)
public void testFifoScheduling() throws Exception {
GenericTestUtils.setRootLogLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
RMApp app2 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
SchedulerNodeReport report_nm2 =
rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
AllocateResponse alloc1Response = am1.schedule(); // send the request
// add request for containers
am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
AllocateResponse alloc2Response = am2.schedule(); // send the request
// kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(1000);
alloc1Response = am1.schedule();
}
while (alloc2Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 2...");
Thread.sleep(1000);
alloc2Response = am2.schedule();
}
// kick the scheduler, nothing given remaining 2 GB.
nm2.nodeHeartbeat(true);
List<Container> allocated1 = alloc1Response.getAllocatedContainers();
assertEquals(1, allocated1.size());
assertEquals(1 * GB, allocated1.get(0).getResource().getMemorySize());
assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
List<Container> allocated2 = alloc2Response.getAllocatedContainers();
assertEquals(1, allocated2.size());
assertEquals(3 * GB, allocated2.get(0).getResource().getMemorySize());
assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
assertEquals(2 * GB, report_nm2.getAvailableResource().getMemorySize());
assertEquals(6 * GB, report_nm1.getUsedResource().getMemorySize());
assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
Container c1 = allocated1.get(0);
assertEquals(GB, c1.getResource().getMemorySize());
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
"", 0, c1.getResource());
nm1.containerStatus(containerStatus);
int waitCount = 0;
while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
LOG.info("Waiting for containers to be finished for app 1... Tried "
+ waitCount + " times already..");
Thread.sleep(1000);
}
assertEquals(1, attempt1.getJustFinishedContainers().size());
assertEquals(1, am1.schedule().getCompletedContainersStatuses()
.size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(5 * GB, report_nm1.getUsedResource().getMemorySize());
rm.stop();
}
@Test
@Timeout(value = 60)
public void testNodeUpdateBeforeAppAttemptInit() throws Exception {
FifoScheduler scheduler = new FifoScheduler();
MockRM rm = new MockRM(conf);
scheduler.setRMContext(rm.getRMContext());
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, rm.getRMContext());
RMNode node =
MockNodes.newNodeInfo(1, Resources.createResource(1024, 4), 1,
"127.0.0.1");
scheduler.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId = ApplicationId.newInstance(0, 1);
scheduler.addApplication(appId, "queue1", "user1", false, false);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
try {
scheduler.handle(updateEvent);
} catch (NullPointerException e) {
fail();
}
ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 1);
scheduler.addApplicationAttempt(attId, false, false);
rm.stop();
}
private void testMinimumAllocation(YarnConfiguration conf, int testAlloc)
throws Exception {
MockRM rm = new MockRM(conf);
rm.start();
// Register node1
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(testAlloc, rm);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
int checkAlloc =
conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
assertEquals(checkAlloc, report_nm1.getUsedResource().getMemorySize());
rm.stop();
}
@Test
@Timeout(value = 60)
public void testDefaultMinimumAllocation() throws Exception {
// Test with something lesser than default
testMinimumAllocation(new YarnConfiguration(TestFifoScheduler.conf),
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB / 2);
}
@Test
@Timeout(value = 60)
public void testNonDefaultMinimumAllocation() throws Exception {
// Set custom min-alloc to test tweaking it
int allocMB = 1536;
YarnConfiguration conf = new YarnConfiguration(TestFifoScheduler.conf);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, allocMB);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
allocMB * 10);
// Test for something lesser than this.
testMinimumAllocation(conf, allocMB / 2);
}
@Test
@Timeout(value = 50)
public void testReconnectedNode() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
QueuePath defaultQueuePath = new QueuePath("default");
conf.setQueues(defaultQueuePath, new String[] {"default"});
conf.setCapacity(defaultQueuePath, 100);
FifoScheduler fs = new FifoScheduler();
fs.init(conf);
fs.start();
// mock rmContext to avoid NPE.
RMContext context = mock(RMContext.class);
fs.reinitialize(conf, null);
fs.setRMContext(context);
RMNode n1 =
MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
RMNode n2 =
MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2, "127.0.0.3");
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n2));
fs.handle(new NodeUpdateSchedulerEvent(n1));
assertEquals(6 * GB, fs.getRootQueueMetrics().getAvailableMB());
// reconnect n1 with downgraded memory
n1 =
MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1, "127.0.0.2");
fs.handle(new NodeRemovedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeUpdateSchedulerEvent(n1));
assertEquals(4 * GB, fs.getRootQueueMetrics().getAvailableMB());
fs.stop();
}
@Test
@Timeout(value = 50)
public void testBlackListNodes() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();
int rack_num_0 = 0;
int rack_num_1 = 1;
// Add 4 nodes in 2 racks
// host_0_0 in rack0
String host_0_0 = "127.0.0.1";
RMNode n1 =
MockNodes.newNodeInfo(rack_num_0, MockNodes.newResource(4 * GB), 1,
host_0_0);
fs.handle(new NodeAddedSchedulerEvent(n1));
// host_0_1 in rack0
String host_0_1 = "127.0.0.2";
RMNode n2 =
MockNodes.newNodeInfo(rack_num_0, MockNodes.newResource(4 * GB), 1,
host_0_1);
fs.handle(new NodeAddedSchedulerEvent(n2));
// host_1_0 in rack1
String host_1_0 = "127.0.0.3";
RMNode n3 =
MockNodes.newNodeInfo(rack_num_1, MockNodes.newResource(4 * GB), 1,
host_1_0);
fs.handle(new NodeAddedSchedulerEvent(n3));
// host_1_1 in rack1
String host_1_1 = "127.0.0.4";
RMNode n4 =
MockNodes.newNodeInfo(rack_num_1, MockNodes.newResource(4 * GB), 1,
host_1_1);
fs.handle(new NodeAddedSchedulerEvent(n4));
// Add one application
ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
ApplicationAttemptId appAttemptId1 =
BuilderUtils.newApplicationAttemptId(appId1, 1);
createMockRMApp(appAttemptId1, rm.getRMContext());
SchedulerEvent appEvent =
new AppAddedSchedulerEvent(appId1, "queue", "user");
fs.handle(appEvent);
SchedulerEvent attemptEvent =
new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
fs.handle(attemptEvent);
List<ContainerId> emptyId = new ArrayList<ContainerId>();
List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>();
// Allow rack-locality for rack_1, but blacklist host_1_0
// Set up resource requests
// Ask for a 1 GB container for app 1
List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
"rack1", Resources.createResource(GB), 1,
RMNodeLabelsManager.NO_LABEL));
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
ResourceRequest.ANY, Resources.createResource(GB), 1,
RMNodeLabelsManager.NO_LABEL));
fs.allocate(appAttemptId1, ask1, null, emptyId,
Collections.singletonList(host_1_0), null, NULL_UPDATE_REQUESTS);
// Trigger container assignment
fs.handle(new NodeUpdateSchedulerEvent(n3));
// Get the allocation for the application and verify no allocation on
// blacklist node
Allocation allocation1 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(0, allocation1.getContainers().size(), "allocation1");
// verify host_1_1 can get allocated as not in blacklist
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation2 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(1, allocation2.getContainers().size(), "allocation2");
List<Container> containerList = allocation2.getContainers();
for (Container container : containerList) {
assertEquals(container.getNodeId(), n4.getNodeID(),
"Container is allocated on n4");
}
// Ask for a 1 GB container again for app 1
List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
// this time, rack0 is also in blacklist, so only host_1_1 is available to
// be assigned
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
ResourceRequest.ANY, Resources.createResource(GB), 1));
fs.allocate(appAttemptId1, ask2, null, emptyId,
Collections.singletonList("rack0"), null, NULL_UPDATE_REQUESTS);
// verify n1 is not qualified to be allocated
fs.handle(new NodeUpdateSchedulerEvent(n1));
Allocation allocation3 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(0, allocation3.getContainers().size(), "allocation3");
// verify n2 is not qualified to be allocated
fs.handle(new NodeUpdateSchedulerEvent(n2));
Allocation allocation4 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(0, allocation4.getContainers().size(), "allocation4");
// verify n3 is not qualified to be allocated
fs.handle(new NodeUpdateSchedulerEvent(n3));
Allocation allocation5 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(0, allocation5.getContainers().size(), "allocation5");
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation6 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(1, allocation6.getContainers().size(), "allocation6");
containerList = allocation6.getContainers();
for (Container container : containerList) {
assertEquals(container.getNodeId(), n4.getNodeID(),
"Container is allocated on n4");
}
rm.stop();
}
@Test
@Timeout(value = 50)
public void testHeadroom() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();
// Add a node
RMNode n1 =
MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
fs.handle(new NodeAddedSchedulerEvent(n1));
// Add two applications
ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
ApplicationAttemptId appAttemptId1 =
BuilderUtils.newApplicationAttemptId(appId1, 1);
createMockRMApp(appAttemptId1, rm.getRMContext());
SchedulerEvent appEvent =
new AppAddedSchedulerEvent(appId1, "queue", "user");
fs.handle(appEvent);
SchedulerEvent attemptEvent =
new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
fs.handle(attemptEvent);
ApplicationId appId2 = BuilderUtils.newApplicationId(200, 2);
ApplicationAttemptId appAttemptId2 =
BuilderUtils.newApplicationAttemptId(appId2, 1);
createMockRMApp(appAttemptId2, rm.getRMContext());
SchedulerEvent appEvent2 =
new AppAddedSchedulerEvent(appId2, "queue", "user");
fs.handle(appEvent2);
SchedulerEvent attemptEvent2 =
new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
fs.handle(attemptEvent2);
List<ContainerId> emptyId = new ArrayList<ContainerId>();
List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>();
// Set up resource requests
// Ask for a 1 GB container for app 1
List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
ResourceRequest.ANY, Resources.createResource(GB), 1));
fs.allocate(appAttemptId1, ask1, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
// Ask for a 2 GB container for app 2
List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
ResourceRequest.ANY, Resources.createResource(2 * GB), 1));
fs.allocate(appAttemptId2, ask2, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
// Trigger container assignment
fs.handle(new NodeUpdateSchedulerEvent(n1));
// Get the allocation for the applications and verify headroom
Allocation allocation1 =
fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(1 * GB, allocation1
.getResourceLimit().getMemorySize(), "Allocation headroom");
Allocation allocation2 =
fs.allocate(appAttemptId2, emptyAsk, null, emptyId,
null, null, NULL_UPDATE_REQUESTS);
assertEquals(1 * GB, allocation2
.getResourceLimit().getMemorySize(), "Allocation headroom");
rm.stop();
}
@Test
@Timeout(value = 60)
public void testResourceOverCommit() throws Exception {
int waitCount;
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 2 GB used and 2 GB available
assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
AllocateResponse alloc1Response = am1.schedule(); // send the request
// kick the scheduler, 2 GB given to AM1, resource remaining 0
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(1000);
alloc1Response = am1.schedule();
}
List<Container> allocated1 = alloc1Response.getAllocatedContainers();
assertEquals(1, allocated1.size());
assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize());
assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 4 GB used and 0 GB available
assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize());
// check container is assigned with 2 GB.
Container c1 = allocated1.get(0);
assertEquals(2 * GB, c1.getResource().getMemorySize());
// update node resource to 2 GB, so resource is over-consumed.
Map<NodeId, ResourceOption> nodeResourceMap =
new HashMap<NodeId, ResourceOption>();
nodeResourceMap.put(nm1.getNodeId(),
ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
UpdateNodeResourceRequest request =
UpdateNodeResourceRequest.newInstance(nodeResourceMap);
rm.getAdminService().updateNodeResource(request);
waitCount = 0;
while (waitCount++ != 20) {
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
if (null != report_nm1 &&
report_nm1.getAvailableResource().getMemorySize() != 0) {
break;
}
LOG.info("Waiting for RMNodeResourceUpdateEvent to be handled... Tried "
+ waitCount + " times already..");
Thread.sleep(1000);
}
// Now, the used resource is still 4 GB, and available resource is minus
// value.
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize());
assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize());
// Check container can complete successfully in case of resource
// over-commitment.
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
"", 0, c1.getResource());
nm1.containerStatus(containerStatus);
waitCount = 0;
while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
LOG.info("Waiting for containers to be finished for app 1... Tried "
+ waitCount + " times already..");
Thread.sleep(100);
}
assertEquals(1, attempt1.getJustFinishedContainers().size());
assertEquals(1, am1.schedule().getCompletedContainersStatuses()
.size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
// As container return 2 GB back, the available resource becomes 0 again.
assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
rm.stop();
}
@Test
public void testRemovedNodeDecomissioningNode() throws Exception {
NodeStatus mockNodeStatus = createMockNodeStatus();
// Register nodemanager
NodeManager nm = registerNode("host_decom", 1234, 2345,
NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4),
mockNodeStatus);
RMNode node =
resourceManager.getRMContext().getRMNodes().get(nm.getNodeId());
// Send a heartbeat to kick the tires on the Scheduler
NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node);
resourceManager.getResourceScheduler().handle(nodeUpdate);
// Force remove the node to simulate race condition
((FifoScheduler) resourceManager.getResourceScheduler())
.getNodeTracker().removeNode(nm.getNodeId());
// Kick off another heartbeat with the node state mocked to decommissioning
RMNode spyNode =
spy(resourceManager.getRMContext().getRMNodes()
.get(nm.getNodeId()));
when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING);
resourceManager.getResourceScheduler().handle(
new NodeUpdateSchedulerEvent(spyNode));
}
@Test
public void testResourceUpdateDecommissioningNode() throws Exception {
// Mock the RMNodeResourceUpdate event handler to update SchedulerNode
// to have 0 available resource
RMContext spyContext = spy(resourceManager.getRMContext());
Dispatcher mockDispatcher = mock(AsyncDispatcher.class);
when(mockDispatcher.getEventHandler()).thenReturn(new EventHandler<Event>() {
@Override
public void handle(Event event) {
if (event instanceof RMNodeResourceUpdateEvent) {
RMNodeResourceUpdateEvent resourceEvent =
(RMNodeResourceUpdateEvent) event;
resourceManager
.getResourceScheduler()
.getSchedulerNode(resourceEvent.getNodeId())
.updateTotalResource(resourceEvent.getResourceOption().getResource());
}
}
});
doReturn(mockDispatcher).when(spyContext).getDispatcher();
((FifoScheduler) resourceManager.getResourceScheduler())
.setRMContext(spyContext);
((AsyncDispatcher) mockDispatcher).start();
NodeStatus mockNodeStatus = createMockNodeStatus();
// Register node
String host_0 = "host_0";
NodeManager nm_0 = registerNode(host_0, 1234, 2345,
NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4),
mockNodeStatus);
// ResourceRequest priorities
Priority priority_0 = Priority.newInstance(0);
// Submit an application
Application application_0 =
new Application("user_0", "a1", resourceManager);
application_0.submit();
application_0.addNodeManager(host_0, 1234, nm_0);
Resource capability_0_0 = Resources.createResource(1 * GB, 1);
application_0.addResourceRequestSpec(priority_0, capability_0_0);
Task task_0_0 =
new Task(application_0, priority_0, new String[] { host_0 });
application_0.addTask(task_0_0);
// Send resource requests to the scheduler
application_0.schedule();
RMNode node =
resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId());
// Send a heartbeat to kick the tires on the Scheduler
NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node);
resourceManager.getResourceScheduler().handle(nodeUpdate);
// Kick off another heartbeat with the node state mocked to decommissioning
// This should update the schedulernodes to have 0 available resource
RMNode spyNode =
spy(resourceManager.getRMContext().getRMNodes()
.get(nm_0.getNodeId()));
when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING);
resourceManager.getResourceScheduler().handle(
new NodeUpdateSchedulerEvent(spyNode));
// Get allocations from the scheduler
application_0.schedule();
// Check the used resource is 1 GB 1 core
// assertEquals(1 * GB, nm_0.getUsed().getMemory());
Resource usedResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
assertThat(usedResource.getMemorySize()).isEqualTo(1 * GB);
assertThat(usedResource.getVirtualCores()).isEqualTo(1);
// Check total resource of scheduler node is also changed to 1 GB 1 core
Resource totalResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getTotalResource();
assertThat(totalResource.getMemorySize()).isEqualTo(1 * GB);
assertThat(totalResource.getVirtualCores()).isEqualTo(1);
// Check the available resource is 0/0
Resource availableResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
assertThat(availableResource.getMemorySize()).isEqualTo(0);
assertThat(availableResource.getVirtualCores()).isEqualTo(0);
// Kick off another heartbeat where the RMNodeResourceUpdateEvent would
// be skipped for DECOMMISSIONING state since the total resource is
// already equal to used resource from the previous heartbeat.
when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING);
resourceManager.getResourceScheduler().handle(
new NodeUpdateSchedulerEvent(spyNode));
verify(mockDispatcher, times(4)).getEventHandler();
}
private void checkApplicationResourceUsage(int expected,
Application application) {
assertEquals(expected, application.getUsedResources().getMemorySize());
}
private void checkNodeResourceUsage(int expected,
org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
assertEquals(expected, node.getUsed().getMemorySize());
node.checkResourceUsage();
}
public static void main(String[] arg) throws Exception {
TestFifoScheduler t = new TestFifoScheduler();
t.setUp();
t.testFifoScheduler();
t.tearDown();
}
private RMAppImpl createMockRMApp(ApplicationAttemptId attemptId,
RMContext context) {
RMAppImpl app = mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(attemptId.getApplicationId());
RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
when(attempt.getAppAttemptId()).thenReturn(attemptId);
RMAppAttemptMetrics attemptMetric = mock(RMAppAttemptMetrics.class);
when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
when(app.getCurrentAppAttempt()).thenReturn(attempt);
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(submissionContext.getUnmanagedAM()).thenReturn(false);
when(attempt.getSubmissionContext()).thenReturn(submissionContext);
context.getRMApps().putIfAbsent(attemptId.getApplicationId(), app);
return app;
}
}
|
TestFifoScheduler
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/testdata/TargetTypeTest.java
|
{
"start": 16109,
"end": 16129
}
|
enum ____ {}
|
ThisEnum
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/proxy/DetachedProxyAsQueryParameterTest.java
|
{
"start": 3240,
"end": 3542
}
|
class ____ extends Department {
private String name;
public BasicDepartment() {
}
public BasicDepartment(Integer id, String name) {
super( id );
this.name = name;
}
public String getName() {
return name;
}
}
@Entity(name = "SpecialDepartment")
public static
|
BasicDepartment
|
java
|
micronaut-projects__micronaut-core
|
http-client-core/src/main/java/io/micronaut/http/client/annotation/Client.java
|
{
"start": 5073,
"end": 5171
}
|
interface ____ type.
*/
CLIENT,
/**
* Server (controller)
|
definition
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumMongodbComponentBuilderFactory.java
|
{
"start": 8847,
"end": 57087
}
|
class ____ is responsible for persistence of
* connector offsets.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: org.apache.kafka.connect.storage.FileOffsetBackingStore
* Group: consumer
*
* @param offsetStorage the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder offsetStorage(java.lang.String offsetStorage) {
doSetProperty("offsetStorage", offsetStorage);
return this;
}
/**
* Path to file where offsets are to be stored. Required when
* offset.storage is set to the FileOffsetBackingStore.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param offsetStorageFileName the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder offsetStorageFileName(java.lang.String offsetStorageFileName) {
doSetProperty("offsetStorageFileName", offsetStorageFileName);
return this;
}
/**
* The number of partitions used when creating the offset storage topic.
* Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*
* @param offsetStoragePartitions the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder offsetStoragePartitions(int offsetStoragePartitions) {
doSetProperty("offsetStoragePartitions", offsetStoragePartitions);
return this;
}
/**
* Replication factor used when creating the offset storage topic.
* Required when offset.storage is set to the KafkaOffsetBackingStore.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*
* @param offsetStorageReplicationFactor the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder offsetStorageReplicationFactor(int offsetStorageReplicationFactor) {
doSetProperty("offsetStorageReplicationFactor", offsetStorageReplicationFactor);
return this;
}
/**
* The name of the Kafka topic where offsets are to be stored. Required
* when offset.storage is set to the KafkaOffsetBackingStore.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param offsetStorageTopic the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder offsetStorageTopic(java.lang.String offsetStorageTopic) {
doSetProperty("offsetStorageTopic", offsetStorageTopic);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* The method used to capture changes from MongoDB server. Options
* include: 'change_streams' to capture changes via MongoDB Change
* Streams, update events do not contain full documents;
* 'change_streams_update_full' (the default) to capture changes via
* MongoDB Change Streams, update events contain full documents.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: change_streams_update_full
* Group: mongodb
*
* @param captureMode the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder captureMode(java.lang.String captureMode) {
doSetProperty("captureMode", captureMode);
return this;
}
/**
* A comma-separated list of regular expressions or literals that match
* the collection names for which changes are to be excluded.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param collectionExcludeList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder collectionExcludeList(java.lang.String collectionExcludeList) {
doSetProperty("collectionExcludeList", collectionExcludeList);
return this;
}
/**
* A comma-separated list of regular expressions or literals that match
* the collection names for which changes are to be captured.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param collectionIncludeList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder collectionIncludeList(java.lang.String collectionIncludeList) {
doSetProperty("collectionIncludeList", collectionIncludeList);
return this;
}
/**
* The maximum time in milliseconds to wait for connection validation to
* complete. Defaults to 60 seconds.
*
* The option is a: <code>long</code> type.
*
* Default: 1m
* Group: mongodb
*
* @param connectionValidationTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder connectionValidationTimeoutMs(long connectionValidationTimeoutMs) {
doSetProperty("connectionValidationTimeoutMs", connectionValidationTimeoutMs);
return this;
}
/**
* Optional list of custom converters that would be used instead of
* default ones. The converters are defined using '.type' config option
* and configured using options '.'.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param converters the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder converters(java.lang.String converters) {
doSetProperty("converters", converters);
return this;
}
/**
* The maximum processing time in milliseconds to wait for the oplog
* cursor to process a single poll request.
*
* The option is a: <code>int</code> type.
*
* Group: mongodb
*
* @param cursorMaxAwaitTimeMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder cursorMaxAwaitTimeMs(int cursorMaxAwaitTimeMs) {
doSetProperty("cursorMaxAwaitTimeMs", cursorMaxAwaitTimeMs);
return this;
}
/**
* The custom metric tags will accept key-value pairs to customize the
* MBean object name which should be appended the end of regular name,
* each key would represent a tag for the MBean object name, and the
* corresponding value would be the value of that tag the key is. For
* example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param customMetricTags the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder customMetricTags(java.lang.String customMetricTags) {
doSetProperty("customMetricTags", customMetricTags);
return this;
}
/**
* A comma-separated list of regular expressions or literals that match
* the database names for which changes are to be excluded.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param databaseExcludeList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder databaseExcludeList(java.lang.String databaseExcludeList) {
doSetProperty("databaseExcludeList", databaseExcludeList);
return this;
}
/**
* A comma-separated list of regular expressions or literals that match
* the database names for which changes are to be captured.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param databaseIncludeList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder databaseIncludeList(java.lang.String databaseIncludeList) {
doSetProperty("databaseIncludeList", databaseIncludeList);
return this;
}
/**
* The maximum number of retries on connection errors before failing (-1
* = no limit, 0 = disabled, 0 = num of retries).
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: mongodb
*
* @param errorsMaxRetries the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder errorsMaxRetries(int errorsMaxRetries) {
doSetProperty("errorsMaxRetries", errorsMaxRetries);
return this;
}
/**
* Specify how failures during processing of events (i.e. when
* encountering a corrupted event) should be handled, including: 'fail'
* (the default) an exception indicating the problematic event and its
* position is raised, causing the connector to be stopped; 'warn' the
* problematic event and its position will be logged and the event will
* be skipped; 'ignore' the problematic event will be skipped.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: fail
* Group: mongodb
*
* @param eventProcessingFailureHandlingMode the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder eventProcessingFailureHandlingMode(java.lang.String eventProcessingFailureHandlingMode) {
doSetProperty("eventProcessingFailureHandlingMode", eventProcessingFailureHandlingMode);
return this;
}
/**
* The maximum time in milliseconds to wait for task executor to shut
* down.
*
* The option is a: <code>long</code> type.
*
* Default: 4s
* Group: mongodb
*
* @param executorShutdownTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder executorShutdownTimeoutMs(long executorShutdownTimeoutMs) {
doSetProperty("executorShutdownTimeoutMs", executorShutdownTimeoutMs);
return this;
}
/**
* Enable/Disable Debezium context headers that provides essential
* metadata for tracking and identifying the source of CDC events in
* downstream processing systems.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: mongodb
*
* @param extendedHeadersEnabled the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder extendedHeadersEnabled(boolean extendedHeadersEnabled) {
doSetProperty("extendedHeadersEnabled", extendedHeadersEnabled);
return this;
}
/**
* A comma-separated list of the fully-qualified names of fields that
* should be excluded from change event message values.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param fieldExcludeList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder fieldExcludeList(java.lang.String fieldExcludeList) {
doSetProperty("fieldExcludeList", fieldExcludeList);
return this;
}
/**
* A comma-separated list of the fully-qualified replacements of fields
* that should be used to rename fields in change event message values.
* Fully-qualified replacements for fields are of the form
* databaseName.collectionName.fieldName.nestedFieldName:newNestedFieldName, where databaseName and collectionName may contain the wildcard () which matches any characters, the colon character (:) is used to determine rename mapping of field.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param fieldRenames the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder fieldRenames(java.lang.String fieldRenames) {
doSetProperty("fieldRenames", fieldRenames);
return this;
}
/**
* Specify the action to take when a guardrail collections limit is
* exceeded: 'warn' (the default) logs a warning message and continues
* processing; 'fail' stops the connector with an error.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: warn
* Group: mongodb
*
* @param guardrailCollectionsLimitAction the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder guardrailCollectionsLimitAction(java.lang.String guardrailCollectionsLimitAction) {
doSetProperty("guardrailCollectionsLimitAction", guardrailCollectionsLimitAction);
return this;
}
/**
* The maximum number of collections or tables that can be captured by
* the connector. When this limit is exceeded, the action specified by
* 'guardrail.collections.limit.action' will be taken. Set to 0 to
* disable this guardrail.
*
* The option is a: <code>int</code> type.
*
* Group: mongodb
*
* @param guardrailCollectionsMax the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder guardrailCollectionsMax(int guardrailCollectionsMax) {
doSetProperty("guardrailCollectionsMax", guardrailCollectionsMax);
return this;
}
/**
* Length of an interval in milli-seconds in in which the connector
* periodically sends heartbeat messages to a heartbeat topic. Use 0 to
* disable heartbeat messages. Disabled by default.
*
* The option is a: <code>int</code> type.
*
* Default: 0ms
* Group: mongodb
*
* @param heartbeatIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder heartbeatIntervalMs(int heartbeatIntervalMs) {
doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
return this;
}
/**
* The prefix that is used to name heartbeat topics.Defaults to
* __debezium-heartbeat.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: __debezium-heartbeat
* Group: mongodb
*
* @param heartbeatTopicsPrefix the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder heartbeatTopicsPrefix(java.lang.String heartbeatTopicsPrefix) {
doSetProperty("heartbeatTopicsPrefix", heartbeatTopicsPrefix);
return this;
}
/**
* Specify the strategy used for watermarking during an incremental
* snapshot: 'insert_insert' both open and close signal is written into
* signal data collection (default); 'insert_delete' only open signal is
* written on signal data collection, the close will delete the relative
* open signal;.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: INSERT_INSERT
* Group: mongodb
*
* @param incrementalSnapshotWatermarkingStrategy the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder incrementalSnapshotWatermarkingStrategy(java.lang.String incrementalSnapshotWatermarkingStrategy) {
doSetProperty("incrementalSnapshotWatermarkingStrategy", incrementalSnapshotWatermarkingStrategy);
return this;
}
/**
* Maximum size of each batch of source records. Defaults to 2048.
*
* The option is a: <code>int</code> type.
*
* Default: 2048
* Group: mongodb
*
* @param maxBatchSize the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder maxBatchSize(int maxBatchSize) {
doSetProperty("maxBatchSize", maxBatchSize);
return this;
}
/**
* Maximum size of the queue for change events read from the database
* log but not yet recorded or forwarded. Defaults to 8192, and should
* always be larger than the maximum batch size.
*
* The option is a: <code>int</code> type.
*
* Default: 8192
* Group: mongodb
*
* @param maxQueueSize the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder maxQueueSize(int maxQueueSize) {
doSetProperty("maxQueueSize", maxQueueSize);
return this;
}
/**
* Maximum size of the queue in bytes for change events read from the
* database log but not yet recorded or forwarded. Defaults to 0. Mean
* the feature is not enabled.
*
* The option is a: <code>long</code> type.
*
* Group: mongodb
*
* @param maxQueueSizeInBytes the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder maxQueueSizeInBytes(long maxQueueSizeInBytes) {
doSetProperty("maxQueueSizeInBytes", maxQueueSizeInBytes);
return this;
}
/**
* Database containing user credentials.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: admin
* Group: mongodb
*
* @param mongodbAuthsource the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbAuthsource(java.lang.String mongodbAuthsource) {
doSetProperty("mongodbAuthsource", mongodbAuthsource);
return this;
}
/**
* Database connection string.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param mongodbConnectionString the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbConnectionString(java.lang.String mongodbConnectionString) {
doSetProperty("mongodbConnectionString", mongodbConnectionString);
return this;
}
/**
* The connection timeout, given in milliseconds. Defaults to 10 seconds
* (10,000 ms).
*
* The option is a: <code>int</code> type.
*
* Default: 10s
* Group: mongodb
*
* @param mongodbConnectTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbConnectTimeoutMs(int mongodbConnectTimeoutMs) {
doSetProperty("mongodbConnectTimeoutMs", mongodbConnectTimeoutMs);
return this;
}
/**
* The frequency that the cluster monitor attempts to reach each server.
* Defaults to 10 seconds (10,000 ms).
*
* The option is a: <code>int</code> type.
*
* Default: 10s
* Group: mongodb
*
* @param mongodbHeartbeatFrequencyMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbHeartbeatFrequencyMs(int mongodbHeartbeatFrequencyMs) {
doSetProperty("mongodbHeartbeatFrequencyMs", mongodbHeartbeatFrequencyMs);
return this;
}
/**
* Password to be used when connecting to MongoDB, if necessary.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param mongodbPassword the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbPassword(java.lang.String mongodbPassword) {
doSetProperty("mongodbPassword", mongodbPassword);
return this;
}
/**
* Interval for looking for new, removed, or changed replica sets, given
* in milliseconds. Defaults to 30 seconds (30,000 ms).
*
* The option is a: <code>long</code> type.
*
* Default: 30s
* Group: mongodb
*
* @param mongodbPollIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbPollIntervalMs(long mongodbPollIntervalMs) {
doSetProperty("mongodbPollIntervalMs", mongodbPollIntervalMs);
return this;
}
/**
* The server selection timeout, given in milliseconds. Defaults to 10
* seconds (10,000 ms).
*
* The option is a: <code>int</code> type.
*
* Default: 30s
* Group: mongodb
*
* @param mongodbServerSelectionTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbServerSelectionTimeoutMs(int mongodbServerSelectionTimeoutMs) {
doSetProperty("mongodbServerSelectionTimeoutMs", mongodbServerSelectionTimeoutMs);
return this;
}
/**
* The socket timeout, given in milliseconds. Defaults to 0 ms.
*
* The option is a: <code>int</code> type.
*
* Default: 0ms
* Group: mongodb
*
* @param mongodbSocketTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbSocketTimeoutMs(int mongodbSocketTimeoutMs) {
doSetProperty("mongodbSocketTimeoutMs", mongodbSocketTimeoutMs);
return this;
}
/**
* Should connector use SSL to connect to MongoDB instances.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param mongodbSslEnabled the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbSslEnabled(boolean mongodbSslEnabled) {
doSetProperty("mongodbSslEnabled", mongodbSslEnabled);
return this;
}
/**
* Whether invalid host names are allowed when using SSL. If true the
* connection will not prevent man-in-the-middle attacks.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param mongodbSslInvalidHostnameAllowed the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbSslInvalidHostnameAllowed(boolean mongodbSslInvalidHostnameAllowed) {
doSetProperty("mongodbSslInvalidHostnameAllowed", mongodbSslInvalidHostnameAllowed);
return this;
}
/**
* Database user for connecting to MongoDB, if necessary.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param mongodbUser the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder mongodbUser(java.lang.String mongodbUser) {
doSetProperty("mongodbUser", mongodbUser);
return this;
}
/**
* List of notification channels names that are enabled.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param notificationEnabledChannels the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder notificationEnabledChannels(java.lang.String notificationEnabledChannels) {
doSetProperty("notificationEnabledChannels", notificationEnabledChannels);
return this;
}
/**
* The name of the topic for the notifications. This is required in case
* 'sink' is in the list of enabled channels.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param notificationSinkTopicName the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder notificationSinkTopicName(java.lang.String notificationSinkTopicName) {
doSetProperty("notificationSinkTopicName", notificationSinkTopicName);
return this;
}
/**
* Path to OpenLineage file configuration. See
* https://openlineage.io/docs/client/java/configuration.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: ./openlineage.yml
* Group: mongodb
*
* @param openlineageIntegrationConfigFilePath the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationConfigFilePath(java.lang.String openlineageIntegrationConfigFilePath) {
doSetProperty("openlineageIntegrationConfigFilePath", openlineageIntegrationConfigFilePath);
return this;
}
/**
* The Kafka bootstrap server address used as input/output namespace/.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param openlineageIntegrationDatasetKafkaBootstrapServers the value
* to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationDatasetKafkaBootstrapServers(java.lang.String openlineageIntegrationDatasetKafkaBootstrapServers) {
doSetProperty("openlineageIntegrationDatasetKafkaBootstrapServers", openlineageIntegrationDatasetKafkaBootstrapServers);
return this;
}
/**
* Enable Debezium to emit data lineage metadata through OpenLineage
* API.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param openlineageIntegrationEnabled the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationEnabled(boolean openlineageIntegrationEnabled) {
doSetProperty("openlineageIntegrationEnabled", openlineageIntegrationEnabled);
return this;
}
/**
* The job's description emitted by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: Debezium change data capture job
* Group: mongodb
*
* @param openlineageIntegrationJobDescription the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationJobDescription(java.lang.String openlineageIntegrationJobDescription) {
doSetProperty("openlineageIntegrationJobDescription", openlineageIntegrationJobDescription);
return this;
}
/**
* The job's namespace emitted by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param openlineageIntegrationJobNamespace the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationJobNamespace(java.lang.String openlineageIntegrationJobNamespace) {
doSetProperty("openlineageIntegrationJobNamespace", openlineageIntegrationJobNamespace);
return this;
}
/**
* The job's owners emitted by Debezium. A comma-separated list of
* key-value pairs.For example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param openlineageIntegrationJobOwners the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationJobOwners(java.lang.String openlineageIntegrationJobOwners) {
doSetProperty("openlineageIntegrationJobOwners", openlineageIntegrationJobOwners);
return this;
}
/**
* The job's tags emitted by Debezium. A comma-separated list of
* key-value pairs.For example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param openlineageIntegrationJobTags the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder openlineageIntegrationJobTags(java.lang.String openlineageIntegrationJobTags) {
doSetProperty("openlineageIntegrationJobTags", openlineageIntegrationJobTags);
return this;
}
/**
* Time to wait for new change events to appear after receiving no
* events, given in milliseconds. Defaults to 500 ms.
*
* The option is a: <code>long</code> type.
*
* Default: 500ms
* Group: mongodb
*
* @param pollIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder pollIntervalMs(long pollIntervalMs) {
doSetProperty("pollIntervalMs", pollIntervalMs);
return this;
}
/**
* Optional list of post processors. The processors are defined using
* '.type' config option and configured using options ''.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param postProcessors the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder postProcessors(java.lang.String postProcessors) {
doSetProperty("postProcessors", postProcessors);
return this;
}
/**
* Enables transaction metadata extraction together with event counting.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param provideTransactionMetadata the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder provideTransactionMetadata(boolean provideTransactionMetadata) {
doSetProperty("provideTransactionMetadata", provideTransactionMetadata);
return this;
}
/**
* The maximum number of records that should be loaded into memory while
* streaming. A value of '0' uses the default JDBC fetch size.
*
* The option is a: <code>int</code> type.
*
* Group: mongodb
*
* @param queryFetchSize the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder queryFetchSize(int queryFetchSize) {
doSetProperty("queryFetchSize", queryFetchSize);
return this;
}
/**
* Time to wait before restarting connector after retriable exception
* occurs. Defaults to 10000ms.
*
* The option is a: <code>long</code> type.
*
* Default: 10s
* Group: mongodb
*
* @param retriableRestartConnectorWaitMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder retriableRestartConnectorWaitMs(long retriableRestartConnectorWaitMs) {
doSetProperty("retriableRestartConnectorWaitMs", retriableRestartConnectorWaitMs);
return this;
}
/**
* The path to the file that will be used to record the database schema
* history.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param schemaHistoryInternalFileFilename the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder schemaHistoryInternalFileFilename(java.lang.String schemaHistoryInternalFileFilename) {
doSetProperty("schemaHistoryInternalFileFilename", schemaHistoryInternalFileFilename);
return this;
}
/**
* Specify how schema names should be adjusted for compatibility with
* the message converter used by the connector, including: 'avro'
* replaces the characters that cannot be used in the Avro type name
* with underscore; 'avro_unicode' replaces the underscore or characters
* that cannot be used in the Avro type name with corresponding unicode
* like _uxxxx. Note: _ is an escape sequence like backslash in
* Java;'none' does not apply any adjustment (default).
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: none
* Group: mongodb
*
* @param schemaNameAdjustmentMode the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder schemaNameAdjustmentMode(java.lang.String schemaNameAdjustmentMode) {
doSetProperty("schemaNameAdjustmentMode", schemaNameAdjustmentMode);
return this;
}
/**
* The name of the data collection that is used to send signals/commands
* to Debezium. Signaling is disabled when not set.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param signalDataCollection the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder signalDataCollection(java.lang.String signalDataCollection) {
doSetProperty("signalDataCollection", signalDataCollection);
return this;
}
/**
* List of channels names that are enabled. Source channel is enabled by
* default.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: source
* Group: mongodb
*
* @param signalEnabledChannels the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder signalEnabledChannels(java.lang.String signalEnabledChannels) {
doSetProperty("signalEnabledChannels", signalEnabledChannels);
return this;
}
/**
* Interval for looking for new signals in registered channels, given in
* milliseconds. Defaults to 5 seconds.
*
* The option is a: <code>long</code> type.
*
* Default: 5s
* Group: mongodb
*
* @param signalPollIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder signalPollIntervalMs(long signalPollIntervalMs) {
doSetProperty("signalPollIntervalMs", signalPollIntervalMs);
return this;
}
/**
* The comma-separated list of operations to skip during streaming,
* defined as: 'c' for inserts/create; 'u' for updates; 'd' for deletes,
* 't' for truncates, and 'none' to indicate nothing skipped. By
* default, only truncate operations will be skipped.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: t
* Group: mongodb
*
* @param skippedOperations the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder skippedOperations(java.lang.String skippedOperations) {
doSetProperty("skippedOperations", skippedOperations);
return this;
}
/**
* This property contains a comma-separated list of ., for which the
* initial snapshot may be a subset of data present in the data source.
* The subset would be defined by mongodb filter query specified as
* value for property snapshot.collection.filter.override..
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param snapshotCollectionFilterOverrides the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotCollectionFilterOverrides(java.lang.String snapshotCollectionFilterOverrides) {
doSetProperty("snapshotCollectionFilterOverrides", snapshotCollectionFilterOverrides);
return this;
}
/**
* A delay period before a snapshot will begin, given in milliseconds.
* Defaults to 0 ms.
*
* The option is a: <code>long</code> type.
*
* Default: 0ms
* Group: mongodb
*
* @param snapshotDelayMs the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotDelayMs(long snapshotDelayMs) {
doSetProperty("snapshotDelayMs", snapshotDelayMs);
return this;
}
/**
* The maximum number of records that should be loaded into memory while
* performing a snapshot.
*
* The option is a: <code>int</code> type.
*
* Group: mongodb
*
* @param snapshotFetchSize the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotFetchSize(int snapshotFetchSize) {
doSetProperty("snapshotFetchSize", snapshotFetchSize);
return this;
}
/**
* This setting must be set to specify a list of tables/collections
* whose snapshot must be taken on creating or restarting the connector.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: mongodb
*
* @param snapshotIncludeCollectionList the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotIncludeCollectionList(java.lang.String snapshotIncludeCollectionList) {
doSetProperty("snapshotIncludeCollectionList", snapshotIncludeCollectionList);
return this;
}
/**
* The maximum number of threads used to perform the snapshot. Defaults
* to 1.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: mongodb
*
* @param snapshotMaxThreads the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotMaxThreads(int snapshotMaxThreads) {
doSetProperty("snapshotMaxThreads", snapshotMaxThreads);
return this;
}
/**
* The criteria for running a snapshot upon startup of the connector.
* Select one of the following snapshot options: 'initial' (default): If
* the connector does not detect any offsets for the logical server
* name, it runs a snapshot that captures the current full state of the
* configured tables. After the snapshot completes, the connector begins
* to stream changes from the oplog. 'never': The connector does not run
* a snapshot. Upon first startup, the connector immediately begins
* reading from the beginning of the oplog.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: initial
* Group: mongodb
*
* @param snapshotMode the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotMode(java.lang.String snapshotMode) {
doSetProperty("snapshotMode", snapshotMode);
return this;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting
* permits to specify whenever the data should be snapshotted or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param snapshotModeConfigurationBasedSnapshotData the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotModeConfigurationBasedSnapshotData(boolean snapshotModeConfigurationBasedSnapshotData) {
doSetProperty("snapshotModeConfigurationBasedSnapshotData", snapshotModeConfigurationBasedSnapshotData);
return this;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting
* permits to specify whenever the data should be snapshotted or not in
* case of error.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param snapshotModeConfigurationBasedSnapshotOnDataError the value to
* set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotModeConfigurationBasedSnapshotOnDataError(boolean snapshotModeConfigurationBasedSnapshotOnDataError) {
doSetProperty("snapshotModeConfigurationBasedSnapshotOnDataError", snapshotModeConfigurationBasedSnapshotOnDataError);
return this;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting
* permits to specify whenever the schema should be snapshotted or not
* in case of error.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param snapshotModeConfigurationBasedSnapshotOnSchemaError the value
* to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotModeConfigurationBasedSnapshotOnSchemaError(boolean snapshotModeConfigurationBasedSnapshotOnSchemaError) {
doSetProperty("snapshotModeConfigurationBasedSnapshotOnSchemaError", snapshotModeConfigurationBasedSnapshotOnSchemaError);
return this;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting
* permits to specify whenever the schema should be snapshotted or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param snapshotModeConfigurationBasedSnapshotSchema the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotModeConfigurationBasedSnapshotSchema(boolean snapshotModeConfigurationBasedSnapshotSchema) {
doSetProperty("snapshotModeConfigurationBasedSnapshotSchema", snapshotModeConfigurationBasedSnapshotSchema);
return this;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting
* permits to specify whenever the stream should start or not after
* snapshot.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: mongodb
*
* @param snapshotModeConfigurationBasedStartStream the value to set
* @return the dsl builder
*/
default DebeziumMongodbComponentBuilder snapshotModeConfigurationBasedStartStream(boolean snapshotModeConfigurationBasedStartStream) {
doSetProperty("snapshotModeConfigurationBasedStartStream", snapshotModeConfigurationBasedStartStream);
return this;
}
/**
* When 'snapshot.mode' is set as custom, this setting must be set to
* specify a the name of the custom implementation provided in the
* 'name()' method. The implementations must implement the 'Snapshotter'
*
|
that
|
java
|
apache__camel
|
components/camel-oaipmh/src/main/java/org/apache/camel/oaipmh/component/model/OAIPMHVerb.java
|
{
"start": 860,
"end": 985
}
|
enum ____ {
ListRecords,
Identify,
ListIdentifiers,
ListMetadataFormats,
GetRecord,
ListSets
}
|
OAIPMHVerb
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/util/retryable/AsyncRetryStrategies.java
|
{
"start": 2971,
"end": 4308
}
|
class ____<OUT> implements AsyncRetryStrategy<OUT> {
private static final long serialVersionUID = 1L;
private final int maxAttempts;
private final long backoffTimeMillis;
private final Predicate<Collection<OUT>> resultPredicate;
private final Predicate<Throwable> exceptionPredicate;
private FixedDelayRetryStrategy(
int maxAttempts,
long backoffTimeMillis,
Predicate<Collection<OUT>> resultPredicate,
Predicate<Throwable> exceptionPredicate) {
this.maxAttempts = maxAttempts;
this.backoffTimeMillis = backoffTimeMillis;
this.resultPredicate = resultPredicate;
this.exceptionPredicate = exceptionPredicate;
}
@Override
public boolean canRetry(int currentAttempts) {
return currentAttempts <= maxAttempts;
}
@Override
public AsyncRetryPredicate<OUT> getRetryPredicate() {
return new RetryPredicate(resultPredicate, exceptionPredicate);
}
@Override
public long getBackoffTimeMillis(int currentAttempts) {
return backoffTimeMillis;
}
}
/** FixedDelayRetryStrategyBuilder for building a FixedDelayRetryStrategy. */
public static
|
FixedDelayRetryStrategy
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/codec/multipart/FileStorage.java
|
{
"start": 1955,
"end": 2232
}
|
class ____ extends FileStorage {
private final Mono<Path> directory;
public PathFileStorage(Path directory) {
this.directory = Mono.just(directory);
}
@Override
public Mono<Path> directory() {
return this.directory;
}
}
private static final
|
PathFileStorage
|
java
|
apache__spark
|
common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/RegisterExecutor.java
|
{
"start": 1224,
"end": 2851
}
|
class ____ extends BlockTransferMessage {
public final String appId;
public final String execId;
public final ExecutorShuffleInfo executorInfo;
public RegisterExecutor(
String appId,
String execId,
ExecutorShuffleInfo executorInfo) {
this.appId = appId;
this.execId = execId;
this.executorInfo = executorInfo;
}
@Override
protected Type type() { return Type.REGISTER_EXECUTOR; }
@Override
public int hashCode() {
return Objects.hash(appId, execId, executorInfo);
}
@Override
public String toString() {
return "RegisterExecutor[appId=" + appId + ", execId=" + execId +
",executorInfo=" + executorInfo + "]";
}
@Override
public boolean equals(Object other) {
if (other instanceof RegisterExecutor o) {
return Objects.equals(appId, o.appId)
&& Objects.equals(execId, o.execId)
&& Objects.equals(executorInfo, o.executorInfo);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(appId)
+ Encoders.Strings.encodedLength(execId)
+ executorInfo.encodedLength();
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, appId);
Encoders.Strings.encode(buf, execId);
executorInfo.encode(buf);
}
public static RegisterExecutor decode(ByteBuf buf) {
String appId = Encoders.Strings.decode(buf);
String execId = Encoders.Strings.decode(buf);
ExecutorShuffleInfo executorShuffleInfo = ExecutorShuffleInfo.decode(buf);
return new RegisterExecutor(appId, execId, executorShuffleInfo);
}
}
|
RegisterExecutor
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCleanup.java
|
{
"start": 4658,
"end": 11684
}
|
class ____ extends FileOutputCommitter {
@Override
public void abortJob(JobContext context, int state) throws IOException {
JobConf conf = context.getJobConf();
;
Path outputPath = FileOutputFormat.getOutputPath(conf);
FileSystem fs = outputPath.getFileSystem(conf);
String fileName = (state == JobStatus.FAILED) ? TestJobCleanup.ABORT_FAILED_FILE_NAME
: TestJobCleanup.ABORT_KILLED_FILE_NAME;
fs.create(new Path(outputPath, fileName)).close();
}
}
private Path getNewOutputDir() {
return new Path(TEST_ROOT_DIR, "output-" + outDirs++);
}
private void configureJob(JobConf jc, String jobName, int maps, int reds,
Path outDir) {
jc.setJobName(jobName);
jc.setInputFormat(TextInputFormat.class);
jc.setOutputKeyClass(LongWritable.class);
jc.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(jc, inDir);
FileOutputFormat.setOutputPath(jc, outDir);
jc.setMapperClass(IdentityMapper.class);
jc.setReducerClass(IdentityReducer.class);
jc.setNumMapTasks(maps);
jc.setNumReduceTasks(reds);
}
// run a job with 1 map and let it run to completion
private void testSuccessfulJob(String filename,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "job with cleanup()", 1, 0, outDir);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
job.waitForCompletion();
LOG.info("Job finished : " + job.isComplete());
Path testFile = new Path(outDir, filename);
assertTrue(fileSys.exists(testFile),
"Done file \"" + testFile + "\" missing for job " + id);
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse(fileSys.exists(file),
"File " + file + " should not be present for successful job "
+ id);
}
}
// run a job for which all the attempts simply fail.
private void testFailedJob(String fileName,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "fail job with abort()", 1, 0, outDir);
jc.setMaxMapAttempts(1);
// set the job to fail
jc.setMapperClass(UtilsForTests.FailMapper.class);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
job.waitForCompletion();
assertEquals(JobStatus.FAILED, job.getJobState(), "Job did not fail");
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue(fileSys.exists(testFile),
"File " + testFile + " missing for failed job " + id);
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse(fileSys.exists(file),
"File " + file + " should not be present for failed job " + id);
}
}
// run a job which gets stuck in mapper and kill it.
private void testKilledJob(String fileName,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "kill job with abort()", 1, 0, outDir);
// set the job to wait for long
jc.setMapperClass(UtilsForTests.KillMapper.class);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
Counters counters = job.getCounters();
// wait for the map to be launched
while (true) {
if (counters.getCounter(JobCounter.TOTAL_LAUNCHED_MAPS) == 1) {
break;
}
LOG.info("Waiting for a map task to be launched");
UtilsForTests.waitFor(100);
counters = job.getCounters();
}
job.killJob(); // kill the job
job.waitForCompletion(); // wait for the job to complete
assertEquals(JobStatus.KILLED, job.getJobState(), "Job was not killed");
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue(fileSys.exists(testFile),
"File " + testFile + " missing for job " + id);
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse(fileSys.exists(file),
"File " + file + " should not be present for killed job " + id);
}
}
/**
* Test default cleanup/abort behavior
*
* @throws IOException
*/
@Test
public void testDefaultCleanupAndAbort() throws IOException {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
FileOutputCommitter.class, new String[] {});
// check with a failed job
testFailedJob(null, FileOutputCommitter.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
// check default abort job kill
testKilledJob(null, FileOutputCommitter.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
}
/**
* Test if a failed job with custom committer runs the abort code.
*
* @throws IOException
*/
@Test
public void testCustomAbort() throws IOException {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
CommitterWithCustomAbort.class, new String[] { ABORT_FAILED_FILE_NAME,
ABORT_KILLED_FILE_NAME });
// check with a failed job
testFailedJob(ABORT_FAILED_FILE_NAME, CommitterWithCustomAbort.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_KILLED_FILE_NAME });
// check with a killed job
testKilledJob(ABORT_KILLED_FILE_NAME, CommitterWithCustomAbort.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_FAILED_FILE_NAME });
}
/**
* Test if a failed job with custom committer runs the deprecated
* {@link FileOutputCommitter#cleanupJob(JobContext)} code for api
* compatibility testing.
*/
@Test
public void testCustomCleanup() throws IOException {
// check with a successful job
testSuccessfulJob(CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {});
// check with a failed job
testFailedJob(CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
// check with a killed job
testKilledJob(TestJobCleanup.CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
}
}
|
CommitterWithCustomAbort
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Issue101.java
|
{
"start": 244,
"end": 656
}
|
class ____ extends TestCase {
public void test_for_issure() throws Exception {
VO vo = new VO();
vo.a = new Object();
vo.b = vo.a;
vo.c = vo.a;
String text = JSON.toJSONString(vo);
Assert.assertEquals("{\"a\":{},\"b\":{},\"c\":{}}", text);
}
@JSONType(serialzeFeatures=SerializerFeature.DisableCircularReferenceDetect)
public static
|
Issue101
|
java
|
elastic__elasticsearch
|
modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java
|
{
"start": 3931,
"end": 6523
}
|
class ____ extends FieldMapper.Builder {
final Parameter<Boolean> eagerGlobalOrdinals = Parameter.boolParam(
"eager_global_ordinals",
true,
m -> toType(m).eagerGlobalOrdinals,
true
);
final Parameter<List<Relations>> relations = new Parameter<List<Relations>>(
"relations",
true,
Collections::emptyList,
(n, c, o) -> Relations.parse(o),
m -> toType(m).relations,
XContentBuilder::field,
Objects::toString
).setMergeValidator(ParentJoinFieldMapper::checkRelationsConflicts);
final Parameter<Map<String, String>> meta = Parameter.metaParam();
public Builder(String name) {
super(name);
}
public Builder addRelation(String parent, Set<String> children) {
relations.setValue(Collections.singletonList(new Relations(parent, children)));
return this;
}
@Override
protected Parameter<?>[] getParameters() {
return new Parameter<?>[] { eagerGlobalOrdinals, relations, meta };
}
@Override
public ParentJoinFieldMapper build(MapperBuilderContext context) {
if (multiFieldsBuilder.hasMultiFields()) {
DEPRECATION_LOGGER.warn(
DeprecationCategory.MAPPINGS,
CONTENT_TYPE + "_multifields",
"Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future"
);
}
checkObjectOrNested(context, leafName());
final Map<String, ParentIdFieldMapper> parentIdFields = new HashMap<>();
relations.get()
.stream()
.map(relation -> new ParentIdFieldMapper(leafName() + "#" + relation.parent(), eagerGlobalOrdinals.get()))
.forEach(mapper -> parentIdFields.put(mapper.fullPath(), mapper));
Joiner joiner = new Joiner(leafName(), relations.get());
return new ParentJoinFieldMapper(
leafName(),
new JoinFieldType(context.buildFullName(leafName()), joiner, meta.get()),
Collections.unmodifiableMap(parentIdFields),
eagerGlobalOrdinals.get(),
relations.get()
);
}
}
public static final TypeParser PARSER = new TypeParser((n, c) -> {
checkIndexCompatibility(c.getIndexSettings(), n);
return new Builder(n);
});
public static final
|
Builder
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/hash/ReactiveHashScanCursor.java
|
{
"start": 158,
"end": 503
}
|
interface ____<K, V> extends ReactiveCursor<Map<K, V>> {
/**
* Produces a {@code Multi} emitting each entry from hash individually.
* Unlike {@link #next()} which provides the entries by batch, this method returns them one by one.
*
* @return the multi
*/
Multi<Map.Entry<K, V>> toMulti();
}
|
ReactiveHashScanCursor
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PatternMatchingInstanceofTest.java
|
{
"start": 1459,
"end": 1805
}
|
class ____ {
void test(Object o) {
if (o instanceof Test test) {
test(test);
}
}
}
""")
.doTest();
}
@Test
public void seesThroughParens() {
helper
.addInputLines(
"Test.java",
"""
|
Test
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/broadcast/LoConnection.java
|
{
"start": 278,
"end": 701
}
|
class ____ {
@Inject
WebSocketConnection connection;
@OnOpen
void open() {
// Send the message only to the current connection
// This does not make much sense but it's good enough to test the filter
connection.broadcast()
.filter(c -> connection.id().equals(c.id()))
.sendTextAndAwait(connection.pathParam("client").toLowerCase());
}
}
|
LoConnection
|
java
|
apache__camel
|
components/camel-sjms2/src/test/java/org/apache/camel/component/sjms2/Sjms2EndpointTest.java
|
{
"start": 1626,
"end": 6473
}
|
class ____ extends CamelTestSupport {
@RegisterExtension
public static ArtemisService service = ArtemisServiceFactory.createVMService();
@Override
protected boolean useJmx() {
return true;
}
@Test
public void testDefaults() {
Endpoint endpoint = context.getEndpoint("sjms2:test");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint sjms = (Sjms2Endpoint) endpoint;
assertEquals("sjms2://test", sjms.getEndpointUri());
assertEquals(ExchangePattern.InOnly, sjms.createExchange().getPattern());
}
@Test
public void testQueueEndpoint() {
Endpoint sjms = context.getEndpoint("sjms2:queue:test");
assertNotNull(sjms);
assertEquals("sjms2://queue:test", sjms.getEndpointUri());
assertTrue(sjms instanceof Sjms2Endpoint);
}
@Test
public void testJndiStyleEndpointName() {
Sjms2Endpoint sjms = context.getEndpoint("sjms2:/jms/test/hov.t1.dev:topic", Sjms2Endpoint.class);
assertNotNull(sjms);
assertFalse(sjms.isTopic());
assertEquals("/jms/test/hov.t1.dev:topic", sjms.getDestinationName());
}
@Test
public void testSetTransacted() {
Endpoint endpoint = context.getEndpoint("sjms2:queue:test?transacted=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint qe = (Sjms2Endpoint) endpoint;
assertTrue(qe.isTransacted());
}
@Test
public void testAsyncProducer() {
Endpoint endpoint = context.getEndpoint("sjms2:queue:test?synchronous=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint qe = (Sjms2Endpoint) endpoint;
assertTrue(qe.isSynchronous());
}
@Test
public void testReplyTo() {
String replyTo = "reply.to.queue";
Endpoint endpoint = context.getEndpoint("sjms2:queue:test?replyTo=" + replyTo);
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint qe = (Sjms2Endpoint) endpoint;
assertEquals(qe.getReplyTo(), replyTo);
assertEquals(ExchangePattern.InOut, qe.createExchange().getPattern());
}
@Test
public void testDefaultExchangePattern() {
try {
Sjms2Endpoint sjms = (Sjms2Endpoint) context.getEndpoint("sjms2:queue:test");
assertNotNull(sjms);
assertEquals(ExchangePattern.InOnly, sjms.getExchangePattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testInOnlyExchangePattern() {
try {
Endpoint sjms = context.getEndpoint("sjms2:queue:test?exchangePattern=" + ExchangePattern.InOnly);
assertNotNull(sjms);
assertEquals(ExchangePattern.InOnly, sjms.createExchange().getPattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testInOutExchangePattern() {
try {
Endpoint sjms = context.getEndpoint("sjms2:queue:test?exchangePattern=" + ExchangePattern.InOut);
assertNotNull(sjms);
assertEquals(ExchangePattern.InOut, sjms.createExchange().getPattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testReplyToAndMEPMatch() {
String replyTo = "reply.to.queue";
Endpoint endpoint = context
.getEndpoint("sjms2:queue:test?replyTo=" + replyTo + "&exchangePattern=" + ExchangePattern.InOut);
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint qe = (Sjms2Endpoint) endpoint;
assertEquals(qe.getReplyTo(), replyTo);
assertEquals(ExchangePattern.InOut, qe.createExchange().getPattern());
}
@Test
public void testDestinationName() {
Endpoint endpoint = context.getEndpoint("sjms2:queue:test?synchronous=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Sjms2Endpoint);
Sjms2Endpoint qe = (Sjms2Endpoint) endpoint;
assertTrue(qe.isSynchronous());
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext camelContext = super.createCamelContext();
ActiveMQConnectionFactory connectionFactory
= new ActiveMQConnectionFactory(service.serviceAddress());
Sjms2Component component = new Sjms2Component();
component.setConnectionFactory(connectionFactory);
camelContext.addComponent("sjms2", component);
return camelContext;
}
}
|
Sjms2EndpointTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java
|
{
"start": 1485,
"end": 2382
}
|
class ____ extends
ITestSessionDelegationInFilesystem {
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
probeForAssumedRoleARN(getConfiguration());
}
@Override
protected String getDelegationBinding() {
return DELEGATION_TOKEN_ROLE_BINDING;
}
@Override
public Text getTokenKind() {
return ROLE_TOKEN_KIND;
}
/**
* This verifies that the granted credentials only access the target bucket
* by using the credentials in a new S3 client to query the public data bucket.
* @param delegatedFS delegated FS with role-restricted access.
* @throws Exception failure
*/
@Override
protected void verifyRestrictedPermissions(final S3AFileSystem delegatedFS)
throws Exception {
intercept(AccessDeniedException.class,
() -> readExternalDatasetMetadata(delegatedFS));
}
}
|
ITestRoleDelegationInFilesystem
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
|
{
"start": 9137,
"end": 16879
}
|
class ____ extends MockRMWithCustomAMLauncher {
public MockRMWithAMS(Configuration conf, ContainerManagementProtocol containerManager) {
super(conf, containerManager);
}
@Override
protected void doSecureLogin() throws IOException {
// Skip the login.
}
@Override
protected ApplicationMasterService createApplicationMasterService() {
return new ApplicationMasterService(getRMContext(), this.scheduler);
}
@SuppressWarnings("unchecked")
public static Token<? extends TokenIdentifier> setupAndReturnAMRMToken(
InetSocketAddress rmBindAddress,
Collection<Token<? extends TokenIdentifier>> allTokens) {
for (Token<? extends TokenIdentifier> token : allTokens) {
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
SecurityUtil.setTokenService(token, rmBindAddress);
return (Token<AMRMTokenIdentifier>) token;
}
}
return null;
}
}
@ParameterizedTest
@MethodSource("configs")
public void testAuthorizedAccess(Configuration pConf) throws Exception {
initTestAMAuthorization(pConf);
MyContainerManager containerManager = new MyContainerManager();
rm =
new MockRMWithAMS(conf, containerManager);
rm.start();
MockNM nm1 = rm.registerNode("localhost:1234", 5120);
Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>(2);
acls.put(ApplicationAccessType.VIEW_APP, "*");
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppName("appname")
.withUser("appuser")
.withAcls(acls)
.build();
RMApp app = MockRMAppSubmitter.submit(rm, data);
nm1.nodeHeartbeat(true);
int waitCount = 0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt = app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
waitForLaunchedState(attempt);
// Create a client to the RM.
final Configuration conf = rm.getConfig();
final YarnRPC rpc = YarnRPC.create(conf);
UserGroupInformation currentUser = UserGroupInformation
.createRemoteUser(applicationAttemptId.toString());
Credentials credentials = containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress =
rm.getApplicationMasterService().getBindAddress();
Token<? extends TokenIdentifier> amRMToken =
MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client = currentUser
.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
@Override
public ApplicationMasterProtocol run() {
return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rm
.getApplicationMasterService().getBindAddress(), conf);
}
});
RegisterApplicationMasterRequest request = Records
.newRecord(RegisterApplicationMasterRequest.class);
RegisterApplicationMasterResponse response =
client.registerApplicationMaster(request);
assertNotNull(response.getClientToAMTokenMasterKey());
if (UserGroupInformation.isSecurityEnabled()) {
assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
}
assertEquals("*", response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP),
"Register response has bad ACLs");
}
@ParameterizedTest
@MethodSource("configs")
public void testUnauthorizedAccess(Configuration pConf) throws Exception {
initTestAMAuthorization(pConf);
MyContainerManager containerManager = new MyContainerManager();
rm = new MockRMWithAMS(conf, containerManager);
rm.start();
MockNM nm1 = rm.registerNode("localhost:1234", 5120);
RMApp app = MockRMAppSubmitter.submitWithMemory(1024, rm);
nm1.nodeHeartbeat(true);
int waitCount = 0;
while (containerManager.containerTokens == null && waitCount++ < 40) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt = app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf = rm.getConfig();
final YarnRPC rpc = YarnRPC.create(conf);
final InetSocketAddress serviceAddr = conf.getSocketAddr(
YarnConfiguration.RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
UserGroupInformation currentUser = UserGroupInformation
.createRemoteUser(applicationAttemptId.toString());
// First try contacting NM without tokens
ApplicationMasterProtocol client = currentUser
.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
@Override
public ApplicationMasterProtocol run() {
return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class,
serviceAddr, conf);
}
});
RegisterApplicationMasterRequest request = Records
.newRecord(RegisterApplicationMasterRequest.class);
try {
client.registerApplicationMaster(request);
fail("Should fail with authorization error");
} catch (Exception e) {
if (isCause(AccessControlException.class, e)) {
// Because there are no tokens, the request should be rejected as the
// server side will assume we are trying simple auth.
String expectedMessage = "";
if (UserGroupInformation.isSecurityEnabled()) {
expectedMessage = "Client cannot authenticate via:[TOKEN]";
} else {
expectedMessage =
"SIMPLE authentication is not enabled. Available:[TOKEN]";
}
assertTrue(e.getCause().getMessage().contains(expectedMessage));
} else {
throw e;
}
}
// TODO: Add validation of invalid authorization when there's more data in
// the AMRMToken
}
/**
* Identify if an expected throwable included in an exception stack. We use
* this because sometimes, an exception will be wrapped to another exception
* before thrown. Like,
*
* <pre>
* {@code
* void methodA() throws IOException {
* try {
* // something
* } catch (AccessControlException e) {
* // do process
* throw new IOException(e)
* }
* }
* </pre>
*
* So we cannot simply catch AccessControlException by using
* <pre>
* {@code
* try {
* methodA()
* } catch (AccessControlException e) {
* // do something
* }
* </pre>
*
* This method is useful in such cases.
*/
private static boolean isCause(
Class<? extends Throwable> expected,
Throwable e
) {
return (e != null)
&& (expected.isInstance(e) || isCause(expected, e.getCause()));
}
private void waitForLaunchedState(RMAppAttempt attempt)
throws InterruptedException {
int waitCount = 0;
while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED
&& waitCount++ < 40) {
LOG.info("Waiting for AppAttempt to reach LAUNCHED state. "
+ "Current state is " + attempt.getAppAttemptState());
Thread.sleep(1000);
}
assertEquals(attempt.getAppAttemptState(),
RMAppAttemptState.LAUNCHED);
}
}
|
MockRMWithAMS
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/http/Saml2LoginBeanDefinitionParserTests.java
|
{
"start": 5164,
"end": 18708
}
|
class ____ {
static {
OpenSamlInitializationService.initialize();
}
private static final String CONFIG_LOCATION_PREFIX = "classpath:org/springframework/security/config/http/Saml2LoginBeanDefinitionParserTests";
private static final RelyingPartyRegistration registration = TestRelyingPartyRegistrations.noCredentials()
.signingX509Credentials((c) -> c.add(TestSaml2X509Credentials.assertingPartySigningCredential()))
.assertingPartyMetadata((party) -> party
.verificationX509Credentials((c) -> c.add(TestSaml2X509Credentials.relyingPartyVerifyingCredential())))
.build();
private static String SIGNED_RESPONSE;
private static final String IDP_SSO_URL = "https://sso-url.example.com/IDP/SSO";
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired(required = false)
private RequestCache requestCache;
@Autowired(required = false)
private AuthenticationFailureHandler authenticationFailureHandler;
@Autowired(required = false)
private AuthenticationSuccessHandler authenticationSuccessHandler;
@Autowired(required = false)
private RelyingPartyRegistrationRepository repository;
@Autowired(required = false)
private ApplicationListener<AuthenticationSuccessEvent> authenticationSuccessListener;
@Autowired(required = false)
private AuthenticationConverter authenticationConverter;
@Autowired(required = false)
private Saml2AuthenticationRequestResolver authenticationRequestResolver;
@Autowired(required = false)
private Saml2AuthenticationRequestRepository<AbstractSaml2AuthenticationRequest> authenticationRequestRepository;
@Autowired(required = false)
private ApplicationContext applicationContext;
@Autowired
private MockMvc mvc;
@BeforeAll
static void createResponse() throws Exception {
String destination = registration.getAssertionConsumerServiceLocation();
String assertingPartyEntityId = registration.getAssertingPartyMetadata().getEntityId();
String relyingPartyEntityId = registration.getEntityId();
Response response = TestOpenSamlObjects.response(destination, assertingPartyEntityId);
Assertion assertion = TestOpenSamlObjects.assertion("test@saml.user", assertingPartyEntityId,
relyingPartyEntityId, destination);
response.getAssertions().add(assertion);
Response signed = TestOpenSamlObjects.signed(response,
registration.getSigningX509Credentials().iterator().next(), relyingPartyEntityId);
Marshaller marshaller = XMLObjectProviderRegistrySupport.getMarshallerFactory().getMarshaller(signed);
Element element = marshaller.marshall(signed);
String serialized = SerializeSupport.nodeToString(element);
SIGNED_RESPONSE = Saml2Utils.samlEncode(serialized.getBytes(StandardCharsets.UTF_8));
}
@Test
public void requestWhenSingleRelyingPartyRegistrationThenAutoRedirect() throws Exception {
this.spring.configLocations(this.xml("SingleRelyingPartyRegistration")).autowire();
// @formatter:off
this.mvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/saml2/authenticate/one"));
// @formatter:on
verify(this.requestCache).saveRequest(any(), any());
}
@Test
public void requestWhenMultiRelyingPartyRegistrationThenRedirectToLoginWithRelyingParties() throws Exception {
this.spring.configLocations(this.xml("MultiRelyingPartyRegistration")).autowire();
// @formatter:off
this.mvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/login"));
// @formatter:on
}
@Test
public void requestLoginWhenMultiRelyingPartyRegistrationThenReturnLoginPageWithRelyingParties() throws Exception {
this.spring.configLocations(this.xml("MultiRelyingPartyRegistration")).autowire();
// @formatter:off
MvcResult mvcResult = this.mvc.perform(get("/login"))
.andExpect(status().is2xxSuccessful())
.andReturn();
// @formatter:on
String pageContent = mvcResult.getResponse().getContentAsString();
assertThat(pageContent).contains("<a href=\"/saml2/authenticate/two\">two</a>");
assertThat(pageContent).contains("<a href=\"/saml2/authenticate/one\">one</a>");
}
@Test
public void authenticateWhenAuthenticationResponseNotValidThenThrowAuthenticationException() throws Exception {
this.spring.configLocations(this.xml("SingleRelyingPartyRegistration-WithCustomAuthenticationFailureHandler"))
.autowire();
this.mvc.perform(get("/login/saml2/sso/one").param(Saml2ParameterNames.SAML_RESPONSE, "samlResponse123"));
ArgumentCaptor<AuthenticationException> exceptionCaptor = ArgumentCaptor
.forClass(AuthenticationException.class);
verify(this.authenticationFailureHandler).onAuthenticationFailure(any(), any(), exceptionCaptor.capture());
AuthenticationException exception = exceptionCaptor.getValue();
assertThat(exception).isInstanceOf(Saml2AuthenticationException.class);
assertThat(((Saml2AuthenticationException) exception).getSaml2Error().getErrorCode())
.isEqualTo("invalid_response");
}
@Test
public void authenticateWhenAuthenticationResponseValidThenAuthenticate() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository")).autowire();
RelyingPartyRegistration relyingPartyRegistration = relyingPartyRegistrationWithVerifyingCredential();
// @formatter:off
this.mvc.perform(post("/login/saml2/sso/" + relyingPartyRegistration.getRegistrationId()).param(Saml2ParameterNames.SAML_RESPONSE, SIGNED_RESPONSE))
.andDo(MockMvcResultHandlers.print())
.andExpect(status().is2xxSuccessful());
// @formatter:on
ArgumentCaptor<Authentication> authenticationCaptor = ArgumentCaptor.forClass(Authentication.class);
verify(this.authenticationSuccessHandler).onAuthenticationSuccess(any(), any(), authenticationCaptor.capture());
Authentication authentication = authenticationCaptor.getValue();
assertThat(authentication.getPrincipal()).isInstanceOf(Saml2AuthenticatedPrincipal.class);
}
@Test
public void authenticateWhenCustomSecurityContextHolderStrategyThenUses() throws Exception {
this.spring.configLocations(this.xml("WithCustomSecurityContextHolderStrategy")).autowire();
RelyingPartyRegistration relyingPartyRegistration = relyingPartyRegistrationWithVerifyingCredential();
// @formatter:off
this.mvc.perform(post("/login/saml2/sso/" + relyingPartyRegistration.getRegistrationId()).param(Saml2ParameterNames.SAML_RESPONSE, SIGNED_RESPONSE))
.andDo(MockMvcResultHandlers.print())
.andExpect(status().is2xxSuccessful());
// @formatter:on
ArgumentCaptor<Authentication> authenticationCaptor = ArgumentCaptor.forClass(Authentication.class);
verify(this.authenticationSuccessHandler).onAuthenticationSuccess(any(), any(), authenticationCaptor.capture());
Authentication authentication = authenticationCaptor.getValue();
assertThat(authentication.getPrincipal()).isInstanceOf(Saml2AuthenticatedPrincipal.class);
SecurityContextHolderStrategy strategy = this.spring.getContext().getBean(SecurityContextHolderStrategy.class);
verify(strategy, atLeastOnce()).getContext();
}
@Test
public void authenticateWhenAuthenticationResponseValidThenAuthenticationSuccessEventPublished() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository")).autowire();
RelyingPartyRegistration relyingPartyRegistration = relyingPartyRegistrationWithVerifyingCredential();
// @formatter:off
this.mvc.perform(post("/login/saml2/sso/" + relyingPartyRegistration.getRegistrationId()).param(Saml2ParameterNames.SAML_RESPONSE, SIGNED_RESPONSE))
.andDo(MockMvcResultHandlers.print())
.andExpect(status().is2xxSuccessful());
// @formatter:on
verify(this.authenticationSuccessListener).onApplicationEvent(any(AuthenticationSuccessEvent.class));
}
@Test
public void authenticateWhenCustomAuthenticationConverterThenUses() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository-WithCustomAuthenticationConverter"))
.autowire();
RelyingPartyRegistration relyingPartyRegistration = relyingPartyRegistrationWithVerifyingCredential();
String response = new String(Saml2Utils.samlDecode(SIGNED_RESPONSE));
given(this.authenticationConverter.convert(any(HttpServletRequest.class)))
.willReturn(new Saml2AuthenticationToken(relyingPartyRegistration, response));
// @formatter:off
MockHttpServletRequestBuilder request = post("/login/saml2/sso/" + relyingPartyRegistration.getRegistrationId())
.param("SAMLResponse", SIGNED_RESPONSE);
// @formatter:on
this.mvc.perform(request).andExpect(status().is3xxRedirection()).andExpect(redirectedUrl("/"));
verify(this.authenticationConverter).convert(any(HttpServletRequest.class));
}
@Test
public void authenticateWhenCustomAuthenticationManagerThenUses() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository-WithCustomAuthenticationManager"))
.autowire();
RelyingPartyRegistration relyingPartyRegistration = relyingPartyRegistrationWithVerifyingCredential();
AuthenticationManager authenticationManager = this.applicationContext.getBean("customAuthenticationManager",
AuthenticationManager.class);
String response = new String(Saml2Utils.samlDecode(SIGNED_RESPONSE));
given(authenticationManager.authenticate(any()))
.willReturn(new Saml2AuthenticationToken(relyingPartyRegistration, response));
// @formatter:off
MockHttpServletRequestBuilder request = post("/login/saml2/sso/" + relyingPartyRegistration.getRegistrationId())
.param("SAMLResponse", SIGNED_RESPONSE);
// @formatter:on
this.mvc.perform(request).andExpect(status().is3xxRedirection()).andExpect(redirectedUrl("/"));
verify(authenticationManager).authenticate(any());
}
@Test
public void authenticationRequestWhenCustomAuthenticationRequestContextResolverThenUses() throws Exception {
this.spring
.configLocations(this.xml("WithCustomRelyingPartyRepository-WithCustomAuthenticationRequestResolver"))
.autowire();
Saml2RedirectAuthenticationRequest request = Saml2RedirectAuthenticationRequest
.withRelyingPartyRegistration(TestRelyingPartyRegistrations.noCredentials().build())
.samlRequest("request")
.authenticationRequestUri(IDP_SSO_URL)
.build();
given(this.authenticationRequestResolver.resolve(any(HttpServletRequest.class))).willReturn(request);
this.mvc.perform(get("/saml2/authenticate/registration-id")).andExpect(status().isFound());
verify(this.authenticationRequestResolver).resolve(any(HttpServletRequest.class));
}
@Test
public void authenticationRequestWhenCustomAuthnRequestRepositoryThenUses() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository-WithCustomAuthnRequestRepository"))
.autowire();
given(this.repository.findByRegistrationId(anyString()))
.willReturn(TestRelyingPartyRegistrations.relyingPartyRegistration().build());
MockHttpServletRequestBuilder request = get("/saml2/authenticate/registration-id");
this.mvc.perform(request).andExpect(status().isFound());
verify(this.authenticationRequestRepository).saveAuthenticationRequest(
any(AbstractSaml2AuthenticationRequest.class), any(HttpServletRequest.class),
any(HttpServletResponse.class));
}
@Test
public void authenticateWhenCustomAuthnRequestRepositoryThenUses() throws Exception {
this.spring.configLocations(this.xml("WithCustomRelyingPartyRepository-WithCustomAuthnRequestRepository"))
.autowire();
RelyingPartyRegistrationRepository repository = mock(RelyingPartyRegistrationRepository.class);
given(this.repository.findByRegistrationId(anyString()))
.willReturn(TestRelyingPartyRegistrations.relyingPartyRegistration().build());
MockHttpServletRequestBuilder request = post("/login/saml2/sso/registration-id").param("SAMLResponse",
SIGNED_RESPONSE);
this.mvc.perform(request);
verify(this.authenticationRequestRepository).loadAuthenticationRequest(any(HttpServletRequest.class));
verify(this.authenticationRequestRepository).removeAuthenticationRequest(any(HttpServletRequest.class),
any(HttpServletResponse.class));
}
@Test
public void saml2LoginWhenLoginProcessingUrlWithoutRegistrationIdAndDefaultAuthenticationConverterThenValidates() {
assertThatExceptionOfType(BeanDefinitionParsingException.class)
.isThrownBy(() -> this.spring.configLocations(this.xml("WithCustomLoginProcessingUrl")).autowire())
.withMessageContaining("loginProcessingUrl must contain {registrationId} path variable");
}
@Test
public void authenticateWhenCustomLoginProcessingUrlAndCustomAuthenticationConverterThenAuthenticate()
throws Exception {
this.spring.configLocations(this.xml("WithCustomLoginProcessingUrl-WithCustomAuthenticationConverter"))
.autowire();
String response = new String(Saml2Utils.samlDecode(SIGNED_RESPONSE));
given(this.authenticationConverter.convert(any(HttpServletRequest.class)))
.willReturn(new Saml2AuthenticationToken(registration, response));
// @formatter:off
MockHttpServletRequestBuilder request = post("/my/custom/url").param("SAMLResponse", SIGNED_RESPONSE);
// @formatter:on
this.mvc.perform(request).andExpect(redirectedUrl("/"));
verify(this.authenticationConverter).convert(any(HttpServletRequest.class));
}
private RelyingPartyRegistration relyingPartyRegistrationWithVerifyingCredential() {
given(this.repository.findByRegistrationId(anyString())).willReturn(registration);
return registration;
}
private String xml(String configName) {
return CONFIG_LOCATION_PREFIX + "-" + configName + ".xml";
}
}
|
Saml2LoginBeanDefinitionParserTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java
|
{
"start": 31503,
"end": 32143
}
|
enum ____ {
ONE,
TWO,
UNRECOGNIZED
}
void m(Case c) {
int x;
switch (c) {
case ONE -> x = 1;
case TWO -> x = 2;
// Removing this would not compile.
default -> throw new AssertionError();
}
System.out.println(x);
}
}
""")
.doTest();
}
@Test
public void multipleLabels() {
refactoringTestHelper
.addInputLines(
"Test.java",
"""
|
Case
|
java
|
netty__netty
|
resolver-dns/src/main/java/io/netty/resolver/dns/DnsResolveContext.java
|
{
"start": 12119,
"end": 26403
}
|
class ____ extends UnknownHostException {
private static final long serialVersionUID = -8573510133644997085L;
SearchDomainUnknownHostException(Throwable cause, String originalHostname,
DnsRecordType[] queryTypes, String[] searchDomains) {
super("Failed to resolve '" + originalHostname + "' " + Arrays.toString(queryTypes) +
" and search domain query for configured domains failed as well: " +
Arrays.toString(searchDomains));
setStackTrace(cause.getStackTrace());
// Preserve the cause
initCause(cause.getCause());
}
// Suppress a warning since this method doesn't need synchronization
@Override
public Throwable fillInStackTrace() {
return this;
}
}
void doSearchDomainQuery(String hostname, Promise<List<T>> nextPromise) {
DnsResolveContext<T> nextContext = newResolverContext(parent, channel,
originalPromise, hostname, dnsClass,
expectedTypes, additionals, nameServerAddrs,
parent.maxQueriesPerResolve());
nextContext.internalResolve(hostname, nextPromise);
}
private static String hostnameWithDot(String name) {
if (StringUtil.endsWith(name, '.')) {
return name;
}
return name + '.';
}
// Resolve the final name from the CNAME cache until there is nothing to follow anymore. This also
// guards against loops in the cache but early return once a loop is detected.
//
// Visible for testing only
static String cnameResolveFromCache(DnsCnameCache cnameCache, String name) throws UnknownHostException {
String first = cnameCache.get(hostnameWithDot(name));
if (first == null) {
// Nothing in the cache at all
return name;
}
String second = cnameCache.get(hostnameWithDot(first));
if (second == null) {
// Nothing else to follow, return first match.
return first;
}
checkCnameLoop(name, first, second);
return cnameResolveFromCacheLoop(cnameCache, name, first, second);
}
private static String cnameResolveFromCacheLoop(
DnsCnameCache cnameCache, String hostname, String first, String mapping) throws UnknownHostException {
// Detect loops by advance only every other iteration.
// See https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_Tortoise_and_Hare
boolean advance = false;
String name = mapping;
// Resolve from cnameCache() until there is no more cname entry cached.
while ((mapping = cnameCache.get(hostnameWithDot(name))) != null) {
checkCnameLoop(hostname, first, mapping);
name = mapping;
if (advance) {
first = cnameCache.get(first);
}
advance = !advance;
}
return name;
}
private static void checkCnameLoop(String hostname, String first, String second) throws UnknownHostException {
if (first.equals(second)) {
// Follow CNAME from cache would loop. Lets throw and so fail the resolution.
throw new UnknownHostException("CNAME loop detected for '" + hostname + '\'');
}
}
private void internalResolve(String name, Promise<List<T>> promise) {
try {
// Resolve from cnameCache() until there is no more cname entry cached.
name = cnameResolveFromCache(cnameCache(), name);
} catch (Throwable cause) {
promise.tryFailure(cause);
return;
}
try {
DnsServerAddressStream nameServerAddressStream = getNameServers(name);
final int end = expectedTypes.length - 1;
for (int i = 0; i < end; ++i) {
if (!query(name, expectedTypes[i], nameServerAddressStream.duplicate(), false, promise)) {
return;
}
}
query(name, expectedTypes[end], nameServerAddressStream, false, promise);
} finally {
// Now flush everything we submitted before for the Channel.
channel.flush();
}
}
/**
* Returns the {@link DnsServerAddressStream} that was cached for the given hostname or {@code null} if non
* could be found.
*/
private DnsServerAddressStream getNameServersFromCache(String hostname) {
int len = hostname.length();
if (len == 0) {
// We never cache for root servers.
return null;
}
// We always store in the cache with a trailing '.'.
if (hostname.charAt(len - 1) != '.') {
hostname += ".";
}
int idx = hostname.indexOf('.');
if (idx == hostname.length() - 1) {
// We are not interested in handling '.' as we should never serve the root servers from cache.
return null;
}
// We start from the closed match and then move down.
for (;;) {
// Skip '.' as well.
hostname = hostname.substring(idx + 1);
int idx2 = hostname.indexOf('.');
if (idx2 <= 0 || idx2 == hostname.length() - 1) {
// We are not interested in handling '.TLD.' as we should never serve the root servers from cache.
return null;
}
idx = idx2;
DnsServerAddressStream entries = authoritativeDnsServerCache().get(hostname);
if (entries != null) {
// The returned List may contain unresolved InetSocketAddress instances that will be
// resolved on the fly in query(....).
return entries;
}
}
}
private void query(final DnsServerAddressStream nameServerAddrStream,
final int nameServerAddrStreamIndex,
final DnsQuestion question,
final DnsQueryLifecycleObserver queryLifecycleObserver,
final boolean flush,
final Promise<List<T>> promise,
final Throwable cause) {
if (completeEarly || nameServerAddrStreamIndex >= nameServerAddrStream.size() ||
allowedQueries == 0 || originalPromise.isCancelled() || promise.isCancelled()) {
tryToFinishResolve(nameServerAddrStream, nameServerAddrStreamIndex, question, queryLifecycleObserver,
promise, cause);
return;
}
--allowedQueries;
final InetSocketAddress nameServerAddr = nameServerAddrStream.next();
if (nameServerAddr.isUnresolved()) {
queryUnresolvedNameServer(nameServerAddr, nameServerAddrStream, nameServerAddrStreamIndex, question,
queryLifecycleObserver, promise, cause);
return;
}
final Promise<AddressedEnvelope<? extends DnsResponse, InetSocketAddress>> queryPromise =
channel.eventLoop().newPromise();
final long queryStartTimeNanos;
final boolean isFeedbackAddressStream;
if (nameServerAddrStream instanceof DnsServerResponseFeedbackAddressStream) {
queryStartTimeNanos = System.nanoTime();
isFeedbackAddressStream = true;
} else {
queryStartTimeNanos = -1;
isFeedbackAddressStream = false;
}
final Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> f =
parent.doQuery(channel, nameServerAddr, question,
queryLifecycleObserver, additionals, flush, queryPromise);
queriesInProgress.add(f);
f.addListener(new FutureListener<AddressedEnvelope<DnsResponse, InetSocketAddress>>() {
@Override
public void operationComplete(Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> future) {
queriesInProgress.remove(future);
if (promise.isDone() || future.isCancelled()) {
queryLifecycleObserver.queryCancelled(allowedQueries);
// Check if we need to release the envelope itself. If the query was cancelled the getNow() will
// return null as well as the Future will be failed with a CancellationException.
AddressedEnvelope<DnsResponse, InetSocketAddress> result = future.getNow();
if (result != null) {
result.release();
}
return;
}
final Throwable queryCause = future.cause();
try {
if (queryCause == null) {
if (isFeedbackAddressStream) {
final DnsServerResponseFeedbackAddressStream feedbackNameServerAddrStream =
(DnsServerResponseFeedbackAddressStream) nameServerAddrStream;
feedbackNameServerAddrStream.feedbackSuccess(nameServerAddr,
System.nanoTime() - queryStartTimeNanos);
}
onResponse(nameServerAddrStream, nameServerAddrStreamIndex, question, future.getNow(),
queryLifecycleObserver, promise);
} else {
// Server did not respond or I/O error occurred; try again.
if (isFeedbackAddressStream) {
final DnsServerResponseFeedbackAddressStream feedbackNameServerAddrStream =
(DnsServerResponseFeedbackAddressStream) nameServerAddrStream;
feedbackNameServerAddrStream.feedbackFailure(nameServerAddr, queryCause,
System.nanoTime() - queryStartTimeNanos);
}
queryLifecycleObserver.queryFailed(queryCause);
query(nameServerAddrStream, nameServerAddrStreamIndex + 1, question,
newDnsQueryLifecycleObserver(question), true, promise, queryCause);
}
} finally {
tryToFinishResolve(nameServerAddrStream, nameServerAddrStreamIndex, question,
// queryLifecycleObserver has already been terminated at this point so we must
// not allow it to be terminated again by tryToFinishResolve.
NoopDnsQueryLifecycleObserver.INSTANCE,
promise, queryCause);
}
}
});
}
private void queryUnresolvedNameServer(final InetSocketAddress nameServerAddr,
final DnsServerAddressStream nameServerAddrStream,
final int nameServerAddrStreamIndex,
final DnsQuestion question,
final DnsQueryLifecycleObserver queryLifecycleObserver,
final Promise<List<T>> promise,
final Throwable cause) {
final String nameServerName = nameServerAddr.getHostString();
assert nameServerName != null;
// Placeholder so we will not try to finish the original query yet.
final Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> resolveFuture = parent.executor()
.newSucceededFuture(null);
queriesInProgress.add(resolveFuture);
Promise<List<InetAddress>> resolverPromise = parent.executor().newPromise();
resolverPromise.addListener(new FutureListener<List<InetAddress>>() {
@Override
public void operationComplete(final Future<List<InetAddress>> future) {
// Remove placeholder.
queriesInProgress.remove(resolveFuture);
if (future.isSuccess()) {
List<InetAddress> resolvedAddresses = future.getNow();
DnsServerAddressStream addressStream = new CombinedDnsServerAddressStream(
nameServerAddr, resolvedAddresses, nameServerAddrStream);
query(addressStream, nameServerAddrStreamIndex, question,
queryLifecycleObserver, true, promise, cause);
} else {
// Ignore the server and try the next one...
query(nameServerAddrStream, nameServerAddrStreamIndex + 1,
question, queryLifecycleObserver, true, promise, cause);
}
}
});
DnsCache resolveCache = resolveCache();
if (!DnsNameResolver.doResolveAllCached(nameServerName, additionals, resolverPromise, resolveCache,
parent.searchDomains(), parent.ndots(), parent.resolvedInternetProtocolFamiliesUnsafe())) {
new DnsAddressResolveContext(parent, channel,
originalPromise, nameServerName, additionals, parent.newNameServerAddressStream(nameServerName),
// Resolving the unresolved nameserver must be limited by allowedQueries
// so we eventually fail
allowedQueries,
resolveCache,
redirectAuthoritativeDnsServerCache(authoritativeDnsServerCache()), false)
.resolve(resolverPromise);
}
}
private static AuthoritativeDnsServerCache redirectAuthoritativeDnsServerCache(
AuthoritativeDnsServerCache authoritativeDnsServerCache) {
// Don't wrap again to prevent the possibility of an StackOverflowError when wrapping another
// RedirectAuthoritativeDnsServerCache.
if (authoritativeDnsServerCache instanceof RedirectAuthoritativeDnsServerCache) {
return authoritativeDnsServerCache;
}
return new RedirectAuthoritativeDnsServerCache(authoritativeDnsServerCache);
}
private static final
|
SearchDomainUnknownHostException
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java
|
{
"start": 760,
"end": 1109
}
|
class ____ extends ActionType<FindStructureResponse> {
public static final FindFieldStructureAction INSTANCE = new FindFieldStructureAction();
public static final String NAME = "cluster:monitor/text_structure/find_field_structure";
private FindFieldStructureAction() {
super(NAME);
}
public static
|
FindFieldStructureAction
|
java
|
apache__camel
|
components/camel-jetty/src/test/java/org/apache/camel/component/jetty/async/MyAsyncEndpoint.java
|
{
"start": 1047,
"end": 2111
}
|
class ____ extends DefaultEndpoint {
private String reply;
private long delay = 1000;
private int failFirstAttempts;
public MyAsyncEndpoint(String endpointUri, Component component) {
super(endpointUri, component);
}
@Override
public Producer createProducer() {
return new MyAsyncProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) {
throw new UnsupportedOperationException("Consumer not supported");
}
@Override
public boolean isSingleton() {
return false;
}
public String getReply() {
return reply;
}
public void setReply(String reply) {
this.reply = reply;
}
public long getDelay() {
return delay;
}
public void setDelay(long delay) {
this.delay = delay;
}
public int getFailFirstAttempts() {
return failFirstAttempts;
}
public void setFailFirstAttempts(int failFirstAttempts) {
this.failFirstAttempts = failFirstAttempts;
}
}
|
MyAsyncEndpoint
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/test/GreetingResource.java
|
{
"start": 460,
"end": 545
}
|
class ____ {
@RegisterRestClient(configKey = "greeting")
public
|
GreetingResource
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/layout/AbstractStringLayout.java
|
{
"start": 3961,
"end": 10388
}
|
interface ____ {
StringBuilder toSerializable(final LogEvent event, final StringBuilder builder);
}
/**
* Default length for new StringBuilder instances: {@value} .
*/
protected static final int DEFAULT_STRING_BUILDER_SIZE = 1024;
protected static final int MAX_STRING_BUILDER_SIZE =
Math.max(DEFAULT_STRING_BUILDER_SIZE, size("log4j.layoutStringBuilder.maxSize", 2 * 1024));
private static final ThreadLocal<StringBuilder> threadLocal = new ThreadLocal<>();
/**
* Returns a {@code StringBuilder} that this Layout implementation can use to write the formatted log event to.
*
* @return a {@code StringBuilder}
*/
protected static StringBuilder getStringBuilder() {
if (AbstractLogger.getRecursionDepth() > 1) { // LOG4J2-2368
// Recursive logging may clobber the cached StringBuilder.
return new StringBuilder(DEFAULT_STRING_BUILDER_SIZE);
}
StringBuilder result = threadLocal.get();
if (result == null) {
result = new StringBuilder(DEFAULT_STRING_BUILDER_SIZE);
threadLocal.set(result);
}
trimToMaxSize(result);
result.setLength(0);
return result;
}
private static int size(final String property, final int defaultValue) {
return PropertiesUtil.getProperties().getIntegerProperty(property, defaultValue);
}
protected static void trimToMaxSize(final StringBuilder stringBuilder) {
StringBuilders.trimToMaxSize(stringBuilder, MAX_STRING_BUILDER_SIZE);
}
private Encoder<StringBuilder> textEncoder;
/**
* The charset for the formatted message.
*/
private final Charset charset;
private final Serializer footerSerializer;
private final Serializer headerSerializer;
protected AbstractStringLayout(final Charset charset) {
this(charset, (byte[]) null, (byte[]) null);
}
/**
* Builds a new layout.
* @param aCharset the charset used to encode the header bytes, footer bytes and anything else that needs to be
* converted from strings to bytes.
* @param header the header bytes
* @param footer the footer bytes
*/
protected AbstractStringLayout(final Charset aCharset, final byte[] header, final byte[] footer) {
super(null, header, footer);
this.headerSerializer = null;
this.footerSerializer = null;
this.charset = aCharset == null ? StandardCharsets.UTF_8 : aCharset;
textEncoder = Constants.ENABLE_DIRECT_ENCODERS ? new StringBuilderEncoder(charset) : null;
}
/**
* Builds a new layout.
* @param config the configuration. May be null.
* @param aCharset the charset used to encode the header bytes, footer bytes and anything else that needs to be
* converted from strings to bytes.
* @param headerSerializer the header bytes serializer
* @param footerSerializer the footer bytes serializer
*/
protected AbstractStringLayout(
final Configuration config,
final Charset aCharset,
final Serializer headerSerializer,
final Serializer footerSerializer) {
super(config, null, null);
this.headerSerializer = headerSerializer;
this.footerSerializer = footerSerializer;
this.charset = aCharset == null ? StandardCharsets.UTF_8 : aCharset;
textEncoder = Constants.ENABLE_DIRECT_ENCODERS ? new StringBuilderEncoder(charset) : null;
}
protected byte[] getBytes(final String s) {
return s.getBytes(charset);
}
@Override
public Charset getCharset() {
return charset;
}
/**
* @return The default content type for Strings.
*/
@Override
public String getContentType() {
return "text/plain";
}
/**
* Returns the footer, if one is available.
*
* @return A byte array containing the footer.
*/
@Override
public byte[] getFooter() {
return serializeToBytes(footerSerializer, super.getFooter());
}
public Serializer getFooterSerializer() {
return footerSerializer;
}
/**
* Returns the header, if one is available.
*
* @return A byte array containing the header.
*/
@Override
public byte[] getHeader() {
return serializeToBytes(headerSerializer, super.getHeader());
}
public Serializer getHeaderSerializer() {
return headerSerializer;
}
private DefaultLogEventFactory getLogEventFactory() {
return DefaultLogEventFactory.getInstance();
}
/**
* Returns a {@code Encoder<StringBuilder>} that this Layout implementation can use for encoding log events.
*
* @return a {@code Encoder<StringBuilder>}
*/
protected Encoder<StringBuilder> getStringBuilderEncoder() {
if (textEncoder == null) {
textEncoder = new StringBuilderEncoder(getCharset());
}
return textEncoder;
}
protected byte[] serializeToBytes(final Serializer serializer, final byte[] defaultValue) {
final String serializable = serializeToString(serializer);
if (serializable == null) {
return defaultValue;
}
return StringEncoder.toBytes(serializable, getCharset());
}
protected String serializeToString(final Serializer serializer) {
if (serializer == null) {
return null;
}
final String loggerName;
final Level level;
if (configuration != null) {
final LoggerConfig rootLogger = configuration.getRootLogger();
loggerName = rootLogger.getName();
level = rootLogger.getLevel();
} else {
loggerName = LogManager.ROOT_LOGGER_NAME;
level = AbstractConfiguration.getDefaultLevel();
}
// Using "" for the FQCN, does it matter?
final LogEvent logEvent =
getLogEventFactory().createEvent(loggerName, null, Strings.EMPTY, level, null, null, null);
return serializer.toSerializable(logEvent);
}
/**
* Formats the Log Event as a byte array.
*
* @param event The Log Event.
* @return The formatted event as a byte array.
*/
@Override
public byte[] toByteArray(final LogEvent event) {
return getBytes(toSerializable(event));
}
}
|
Serializer2
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
|
{
"start": 2431,
"end": 2655
}
|
class ____ {
private ZooKeeper mockZK;
private int count;
private ActiveStandbyElectorCallback mockApp;
private final byte[] data = new byte[8];
private ActiveStandbyElectorTester elector;
|
TestActiveStandbyElector
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/array/HSQLArrayPositionsFunction.java
|
{
"start": 572,
"end": 1544
}
|
class ____ extends AbstractArrayPositionsFunction {
public HSQLArrayPositionsFunction(boolean list, TypeConfiguration typeConfiguration) {
super( list, typeConfiguration );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final Expression arrayExpression = (Expression) sqlAstArguments.get( 0 );
final Expression elementExpression = (Expression) sqlAstArguments.get( 1 );
sqlAppender.append( "case when " );
arrayExpression.accept( walker );
sqlAppender.append( " is not null then coalesce((select array_agg(t.idx) from unnest(");
arrayExpression.accept( walker );
sqlAppender.append(") with ordinality t(val,idx) where t.val is not distinct from " );
walker.render( elementExpression, SqlAstNodeRenderingMode.NO_PLAIN_PARAMETER );
sqlAppender.append( "),cast(array[] as integer array)) end" );
}
}
|
HSQLArrayPositionsFunction
|
java
|
spring-projects__spring-boot
|
build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/ArtifactsLibraries.java
|
{
"start": 1581,
"end": 6082
}
|
class ____ implements Libraries {
private static final Map<String, LibraryScope> SCOPES;
static {
Map<String, LibraryScope> libraryScopes = new HashMap<>();
libraryScopes.put(Artifact.SCOPE_COMPILE, LibraryScope.COMPILE);
libraryScopes.put(Artifact.SCOPE_RUNTIME, LibraryScope.RUNTIME);
libraryScopes.put(Artifact.SCOPE_PROVIDED, LibraryScope.PROVIDED);
libraryScopes.put(Artifact.SCOPE_SYSTEM, LibraryScope.PROVIDED);
SCOPES = Collections.unmodifiableMap(libraryScopes);
}
private final Set<Artifact> artifacts;
private final Set<Artifact> includedArtifacts;
private final Collection<MavenProject> localProjects;
private final @Nullable Collection<Dependency> unpacks;
private final Log log;
/**
* Creates a new {@code ArtifactsLibraries} from the given {@code artifacts}.
* @param artifacts the artifacts to represent as libraries
* @param localProjects projects for which {@link Library#isLocal() local} libraries
* should be created
* @param unpacks artifacts that should be unpacked on launch
* @param log the log
* @since 2.4.0
*/
public ArtifactsLibraries(Set<Artifact> artifacts, Collection<MavenProject> localProjects,
@Nullable Collection<Dependency> unpacks, Log log) {
this(artifacts, artifacts, localProjects, unpacks, log);
}
/**
* Creates a new {@code ArtifactsLibraries} from the given {@code artifacts}.
* @param artifacts all artifacts that can be represented as libraries
* @param includedArtifacts the actual artifacts to include in the uber jar
* @param localProjects projects for which {@link Library#isLocal() local} libraries
* should be created
* @param unpacks artifacts that should be unpacked on launch
* @param log the log
* @since 2.4.8
*/
public ArtifactsLibraries(Set<Artifact> artifacts, Set<Artifact> includedArtifacts,
Collection<MavenProject> localProjects, @Nullable Collection<Dependency> unpacks, Log log) {
this.artifacts = artifacts;
this.includedArtifacts = includedArtifacts;
this.localProjects = localProjects;
this.unpacks = unpacks;
this.log = log;
}
@Override
public void doWithLibraries(LibraryCallback callback) throws IOException {
Set<String> duplicates = getDuplicates(this.artifacts);
for (Artifact artifact : this.artifacts) {
String name = getFileName(artifact);
File file = artifact.getFile();
LibraryScope scope = SCOPES.get(artifact.getScope());
if (scope == null || file == null) {
continue;
}
if (duplicates.contains(name)) {
this.log.debug("Duplicate found: " + name);
name = artifact.getGroupId() + "-" + name;
this.log.debug("Renamed to: " + name);
}
LibraryCoordinates coordinates = new ArtifactLibraryCoordinates(artifact);
boolean unpackRequired = isUnpackRequired(artifact);
boolean local = isLocal(artifact);
boolean included = this.includedArtifacts.contains(artifact);
callback.library(new Library(name, file, scope, coordinates, unpackRequired, local, included));
}
}
private Set<String> getDuplicates(Set<Artifact> artifacts) {
Set<String> duplicates = new HashSet<>();
Set<String> seen = new HashSet<>();
for (Artifact artifact : artifacts) {
String fileName = getFileName(artifact);
if (artifact.getFile() != null && !seen.add(fileName)) {
duplicates.add(fileName);
}
}
return duplicates;
}
private boolean isUnpackRequired(Artifact artifact) {
if (this.unpacks != null) {
for (Dependency unpack : this.unpacks) {
if (artifact.getGroupId().equals(unpack.getGroupId())
&& artifact.getArtifactId().equals(unpack.getArtifactId())) {
return true;
}
}
}
return false;
}
private boolean isLocal(Artifact artifact) {
for (MavenProject localProject : this.localProjects) {
if (localProject.getArtifact().equals(artifact)) {
return true;
}
for (Artifact attachedArtifact : localProject.getAttachedArtifacts()) {
if (attachedArtifact.equals(artifact)) {
return true;
}
}
}
return false;
}
private String getFileName(Artifact artifact) {
StringBuilder sb = new StringBuilder();
sb.append(artifact.getArtifactId()).append("-").append(artifact.getBaseVersion());
String classifier = artifact.getClassifier();
if (classifier != null) {
sb.append("-").append(classifier);
}
sb.append(".").append(artifact.getArtifactHandler().getExtension());
return sb.toString();
}
/**
* {@link LibraryCoordinates} backed by a Maven {@link Artifact}.
*/
private static
|
ArtifactsLibraries
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/boot/models/foreigngenerator/Info.java
|
{
"start": 525,
"end": 1209
}
|
class ____ {
@Id
@GeneratedValue( generator = "foreign" )
@GenericGenerator(
name = "foreign",
type = ForeignGenerator.class,
parameters = @Parameter( name = "property", value = "owner" )
)
private Integer id;
@Basic
private String name;
@ManyToOne
private Thing owner;
protected Info() {
// for Hibernate use
}
public Info(Thing owner, String name) {
this.owner = owner;
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Thing getOwner() {
return owner;
}
public void setOwner(Thing owner) {
this.owner = owner;
}
}
|
Info
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRecordFactory.java
|
{
"start": 1358,
"end": 1805
}
|
class ____ {
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
NodeHeartbeatRequest request = pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
assertEquals(NodeHeartbeatRequestPBImpl.class, request.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
fail("Failed to crete record");
}
}
}
|
TestYSCRecordFactory
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/expressions/ExpressionParserException.java
|
{
"start": 1088,
"end": 1271
}
|
class ____
extends org.apache.flink.table.api.ExpressionParserException {
public ExpressionParserException(String msg) {
super(msg);
}
}
|
ExpressionParserException
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/doublearrays/DoubleArrays_assertContainsAnyOf_Test.java
|
{
"start": 980,
"end": 1479
}
|
class ____ extends DoubleArraysBaseTest {
private Arrays internalArrays;
@BeforeEach
@Override
public void setUp() {
super.setUp();
internalArrays = mock(Arrays.class);
setArrays(internalArrays);
}
@Test
void should_delegate_to_internal_Arrays() {
arrays.assertContainsAnyOf(someInfo(), actual, new double[] { 1, 2, 3 });
verify(internalArrays).assertContainsAnyOf(someInfo(), failures, actual, new double[] { 1, 2, 3 });
}
}
|
DoubleArrays_assertContainsAnyOf_Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java
|
{
"start": 3958,
"end": 6026
}
|
class ____ extends ActionResponse implements ToXContentObject {
private static final ParseField ASSIGNMENT = new ParseField("assignment");
private static final ConstructingObjectParser<Response, Void> PARSER = new ConstructingObjectParser<>(
"create_trained_model_assignment_response",
a -> new Response((TrainedModelAssignment) a[0])
);
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> TrainedModelAssignment.fromXContent(p), ASSIGNMENT);
}
static Response fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
private final TrainedModelAssignment trainedModelAssignment;
public Response(TrainedModelAssignment trainedModelAssignment) {
this.trainedModelAssignment = trainedModelAssignment;
}
public Response(StreamInput in) throws IOException {
this.trainedModelAssignment = new TrainedModelAssignment(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
trainedModelAssignment.writeTo(out);
}
public TrainedModelAssignment getTrainedModelAssignment() {
return trainedModelAssignment;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(ASSIGNMENT.getPreferredName(), trainedModelAssignment);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Response response = (Response) o;
return Objects.equals(trainedModelAssignment, response.trainedModelAssignment);
}
@Override
public int hashCode() {
return Objects.hash(trainedModelAssignment);
}
}
}
|
Response
|
java
|
quarkusio__quarkus
|
core/runtime/src/main/java/io/quarkus/runtime/logging/LogMetricsHandlerRecorder.java
|
{
"start": 478,
"end": 1858
}
|
class ____ {
static final String METRIC_NAME = "log.total";
static final String METRIC_DESCRIPTION = "Number of log events, per log level. Non-standard levels are counted with the lower standard level.";
static final List<Level> STANDARD_LEVELS = Arrays.asList(Level.FATAL, Level.ERROR, Level.WARN, Level.INFO, Level.DEBUG,
Level.TRACE);
static final NavigableMap<Integer, LongAdder> COUNTERS = new TreeMap<>();
public void initCounters() {
for (Level level : STANDARD_LEVELS) {
LongAdder counter = new LongAdder();
// Use integer value to match any non-standard equivalent level
COUNTERS.put(level.intValue(), counter);
}
}
public Consumer<MetricsFactory> registerMetrics() {
return new Consumer<MetricsFactory>() {
@Override
public void accept(MetricsFactory metricsFactory) {
for (Level level : STANDARD_LEVELS) {
metricsFactory.builder(METRIC_NAME).description(METRIC_DESCRIPTION).tag("level", level.getName())
.buildCounter(COUNTERS.get(level.intValue())::sum);
}
}
};
}
public RuntimeValue<Optional<Handler>> getLogHandler() {
return new RuntimeValue(Optional.of(new LogMetricsHandler(COUNTERS)));
}
}
|
LogMetricsHandlerRecorder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/inject/JavaxInjectOnFinalFieldTest.java
|
{
"start": 2311,
"end": 2422
}
|
class ____ {}
/** Class has a final field that is not injectable. */
public
|
TestClass1
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/routebuilder/SpringTemplatedRouteTest.java
|
{
"start": 2331,
"end": 2541
}
|
class ____ {
public MyScriptBean create() {
return new MyScriptBean();
}
@Handler
public String prefix() {
return "-> Hello";
}
}
}
|
MyScriptBean
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest113.java
|
{
"start": 1042,
"end": 5120
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "select zs,ybl,zs-ybl-wxbl wbl,wxbl,xzcs,xscs,dw dwid,pkg_unit.getDwmcById(dw)dwmc from ( select count(1)zs, count(case when l.tbbz = '1' then 1 end)ybl,count(case when l.tbbz = '3' then 1 end)wxbl,count(case when s.a_ajfl='10' and nvl(l.blsj,sysdate)-l.lrsj>1 and l.lrsj>to_date('20150713','yyyymmdd') and l.tbbz!='3' then 1end)xscs, count(case when s.a_ajfl='20' and nvl(l.blsj,sysdate)-l.lrsj>3 and l.lrsj>to_date('20150713','yyyymmdd') and l.tbbz!='3' then 1end)xzcs, substr(sys_dwdm, 0, 8)||'0000' dw from case_m_detail l,case_s_process s where l.a_ajbh=s.a_ajbh and l.sys_dwdm is not null and l.scbz = '0' and l.lrsj >= to_date('2018-01-17','yyyy-mm-dd') and l.lrsj <= to_date('2018-01-24 23:59','yyyy-mm-dd hh24:mi') and l.sys_dwdm like '331126%' group by substr(sys_dwdm, 0, 8)||'0000' ) order by dw";
OracleStatementParser parser = new OracleStatementParser(sql, SQLParserFeature.KeepComments);
List<SQLStatement> statementList = parser.parseStatementList();
System.out.println(statementList.toString());
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(DbType.oracle);
SQLStatement stmt = statementList.get(0);
stmt.accept(visitor);
{
String text = SQLUtils.toOracleString(stmt);
assertEquals("SELECT zs, ybl, zs - ybl - wxbl AS wbl\n" +
"\t, wxbl, xzcs, xscs, dw AS dwid\n" +
"\t, pkg_unit.getDwmcById(dw) AS dwmc\n" +
"FROM (\n" +
"\tSELECT count(1) AS zs\n" +
"\t\t, count(CASE\n" +
"\t\t\tWHEN l.tbbz = '1' THEN 1\n" +
"\t\tEND) AS ybl\n" +
"\t\t, count(CASE\n" +
"\t\t\tWHEN l.tbbz = '3' THEN 1\n" +
"\t\tEND) AS wxbl\n" +
"\t\t, count(CASE\n" +
"\t\t\tWHEN s.a_ajfl = '10'\n" +
"\t\t\t\tAND nvl(l.blsj, SYSDATE) - l.lrsj > 1\n" +
"\t\t\t\tAND l.lrsj > to_date('20150713', 'yyyymmdd')\n" +
"\t\t\t\tAND l.tbbz != '3'\n" +
"\t\t\tTHEN 1\n" +
"\t\tEND) AS xscs\n" +
"\t\t, count(CASE\n" +
"\t\t\tWHEN s.a_ajfl = '20'\n" +
"\t\t\t\tAND nvl(l.blsj, SYSDATE) - l.lrsj > 3\n" +
"\t\t\t\tAND l.lrsj > to_date('20150713', 'yyyymmdd')\n" +
"\t\t\t\tAND l.tbbz != '3'\n" +
"\t\t\tTHEN 1\n" +
"\t\tEND) AS xzcs\n" +
"\t\t, substr(sys_dwdm, 0, 8) || '0000' AS dw\n" +
"\tFROM case_m_detail l, case_s_process s\n" +
"\tWHERE l.a_ajbh = s.a_ajbh\n" +
"\t\tAND l.sys_dwdm IS NOT NULL\n" +
"\t\tAND l.scbz = '0'\n" +
"\t\tAND l.lrsj >= to_date('2018-01-17', 'yyyy-mm-dd')\n" +
"\t\tAND l.lrsj <= to_date('2018-01-24 23:59', 'yyyy-mm-dd hh24:mi')\n" +
"\t\tAND l.sys_dwdm LIKE '331126%'\n" +
"\tGROUP BY substr(sys_dwdm, 0, 8) || '0000'\n" +
")\n" +
"ORDER BY dw", text);
}
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
assertEquals(9, visitor.getColumns().size());
assertEquals(11, visitor.getConditions().size());
assertEquals(1, visitor.getRelationships().size());
assertEquals(1, visitor.getOrderByColumns().size());
}
}
|
OracleSelectTest113
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/graph/StreamGraphGeneratorBatchExecutionTest.java
|
{
"start": 3502,
"end": 8060
}
|
class ____ {
@Test
void testShuffleMode() {
testGlobalStreamExchangeMode(
RuntimeExecutionMode.AUTOMATIC,
BatchShuffleMode.ALL_EXCHANGES_BLOCKING,
GlobalStreamExchangeMode.ALL_EDGES_BLOCKING);
testGlobalStreamExchangeMode(
RuntimeExecutionMode.STREAMING,
BatchShuffleMode.ALL_EXCHANGES_BLOCKING,
GlobalStreamExchangeMode.ALL_EDGES_PIPELINED);
testGlobalStreamExchangeMode(
RuntimeExecutionMode.BATCH,
BatchShuffleMode.ALL_EXCHANGES_PIPELINED,
GlobalStreamExchangeMode.ALL_EDGES_PIPELINED);
testGlobalStreamExchangeMode(
RuntimeExecutionMode.BATCH,
BatchShuffleMode.ALL_EXCHANGES_HYBRID_FULL,
GlobalStreamExchangeMode.ALL_EDGES_HYBRID_FULL);
testGlobalStreamExchangeMode(
RuntimeExecutionMode.BATCH,
BatchShuffleMode.ALL_EXCHANGES_HYBRID_SELECTIVE,
GlobalStreamExchangeMode.ALL_EDGES_HYBRID_SELECTIVE);
}
@Test
void testBatchJobType() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSink<Integer> sink = addDummyPipeline(env);
StreamGraph graph = getStreamGraphInBatchMode(sink);
assertThat(graph.getJobType()).isEqualTo(JobType.BATCH);
}
@Test
void testManagedMemoryWeights() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SingleOutputStreamOperator<Integer> process =
env.fromData(1, 2).keyBy(Integer::intValue).process(DUMMY_PROCESS_FUNCTION);
DataStreamSink<Integer> sink = process.sinkTo(new DiscardingSink<>());
StreamGraph graph = getStreamGraphInBatchMode(sink);
StreamNode processNode = graph.getStreamNode(process.getId());
final Map<ManagedMemoryUseCase, Integer> expectedOperatorWeights = new HashMap<>();
expectedOperatorWeights.put(
ManagedMemoryUseCase.OPERATOR,
ExecutionOptions.SORTED_INPUTS_MEMORY.defaultValue().getMebiBytes());
assertThat(processNode.getManagedMemoryOperatorScopeUseCaseWeights())
.isEqualTo(expectedOperatorWeights);
assertThat(processNode.getManagedMemorySlotScopeUseCases())
.containsOnly(ManagedMemoryUseCase.STATE_BACKEND);
}
@Test
void testCustomManagedMemoryWeights() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SingleOutputStreamOperator<Integer> process =
env.fromData(1, 2).keyBy(Integer::intValue).process(DUMMY_PROCESS_FUNCTION);
DataStreamSink<Integer> sink = process.sinkTo(new DiscardingSink<>());
final Configuration configuration = new Configuration();
configuration.set(ExecutionOptions.SORTED_INPUTS_MEMORY, MemorySize.ofMebiBytes(42));
StreamGraph graph = getStreamGraphInBatchMode(sink, configuration);
StreamNode processNode = graph.getStreamNode(process.getId());
final Map<ManagedMemoryUseCase, Integer> expectedOperatorWeights = new HashMap<>();
expectedOperatorWeights.put(ManagedMemoryUseCase.OPERATOR, 42);
assertThat(processNode.getManagedMemoryOperatorScopeUseCaseWeights())
.isEqualTo(expectedOperatorWeights);
assertThat(processNode.getManagedMemorySlotScopeUseCases())
.containsOnly(ManagedMemoryUseCase.STATE_BACKEND);
}
@Test
void testOneInputTransformation() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SingleOutputStreamOperator<Integer> process =
env.fromData(1, 2).keyBy(Integer::intValue).process(DUMMY_PROCESS_FUNCTION);
DataStreamSink<Integer> sink = process.sinkTo(new DiscardingSink<>());
StreamGraph graph = getStreamGraphInBatchMode(sink);
StreamNode processNode = graph.getStreamNode(process.getId());
assertThat(processNode.getInputRequirements().get(0))
.isEqualTo(StreamConfig.InputRequirement.SORTED);
assertThat(processNode.getOperatorFactory().getChainingStrategy())
.isEqualTo(ChainingStrategy.HEAD);
assertThat(graph.getStateBackend()).isInstanceOf(BatchExecutionStateBackend.class);
// the provider is passed as a lambda therefore we cannot assert the
|
StreamGraphGeneratorBatchExecutionTest
|
java
|
elastic__elasticsearch
|
libs/lz4/src/test/java/org/elasticsearch/lz4/AbstractLZ4TestCase.java
|
{
"start": 1803,
"end": 2654
}
|
class ____ implements TesterBase<byte[]> {
@Override
public byte[] allocate(int length) {
return new byte[length];
}
@Override
public byte[] copyOf(byte[] array) {
return Arrays.copyOf(array, array.length);
}
@Override
public byte[] copyOf(byte[] data, int off, int len) {
return Arrays.copyOfRange(data, off, off + len);
}
@Override
public int maxCompressedLength(int len) {
return LZ4Utils.maxCompressedLength(len);
}
@Override
public void fill(byte[] instance, byte b) {
Arrays.fill(instance, b);
}
}
// Modified to remove redundant modifiers
|
ByteArrayTesterBase
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/builder/HashCodeBuilder.java
|
{
"start": 2323,
"end": 2382
}
|
class ____ code as follows:
* </p>
*
* <pre>
* public
|
write
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java
|
{
"start": 1654,
"end": 2995
}
|
class ____ {
private final BlockingQueue<BackgroundEvent> backgroundEventsQueue = new LinkedBlockingQueue<>();
@ParameterizedTest
@MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider")
public void testRecordBackgroundEventQueueSize(String groupName) {
try (Metrics metrics = new Metrics();
AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName)) {
BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(
backgroundEventsQueue,
new MockTime(0),
asyncConsumerMetrics);
// add event
backgroundEventHandler.add(new ErrorEvent(new Throwable()));
assertEquals(
1,
(double) metrics.metric(
metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName)
).metricValue()
);
// drain event
backgroundEventHandler.drainEvents();
assertEquals(
0,
(double) metrics.metric(
metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName)
).metricValue()
);
}
}
}
|
BackgroundEventHandlerTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/updatemethods/OrganizationMapper.java
|
{
"start": 450,
"end": 1339
}
|
interface ____ {
OrganizationMapper INSTANCE = Mappers.getMapper( OrganizationMapper.class );
@Mappings({
@Mapping(target = "type", constant = "commercial"),
@Mapping(target = "typeNr", constant = "5")
})
void toOrganizationEntity(OrganizationDto dto, @MappingTarget OrganizationEntity entity);
void toCompanyEntity(CompanyDto dto, @MappingTarget CompanyEntity entity);
@Mappings({
@Mapping(target = "employees", ignore = true ),
@Mapping(target = "secretaryToEmployee", ignore = true )
})
DepartmentEntity toDepartmentEntity(DepartmentDto dto);
@Mapping(target = "type", source = "type")
void toName(String type, @MappingTarget OrganizationTypeEntity entity);
@Mapping(target = "number", source = "number")
void toNumber(Integer number, @MappingTarget OrganizationTypeNrEntity entity);
}
|
OrganizationMapper
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/record/EndTransactionMarker.java
|
{
"start": 1161,
"end": 1455
}
|
class ____ the control record which is written to the log to indicate the completion
* of a transaction. The record key specifies the {@link ControlRecordType control type} and the
* value embeds information useful for write validation (for now, just the coordinator epoch).
*/
public
|
represents
|
java
|
quarkusio__quarkus
|
independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/project/extensions/Extensions.java
|
{
"start": 334,
"end": 2779
}
|
class ____ {
private Extensions() {
}
public static ArtifactKey toKey(final Extension extension) {
return ArtifactKey.of(extension.getArtifact().getGroupId(),
extension.getArtifact().getArtifactId(),
extension.getArtifact().getClassifier(),
extension.getArtifact().getType());
}
public static ArtifactKey toKey(final Dependency dependency) {
return ArtifactKey.of(dependency.getGroupId(), dependency.getArtifactId(), dependency.getClassifier(),
dependency.getType());
}
public static Optional<Extension> findInList(Collection<Extension> list, final ArtifactKey key) {
return list.stream().filter(e -> Objects.equals(e.getArtifact().getKey(), key)).findFirst();
}
public static ArtifactCoords toCoords(final ArtifactKey k, final String version) {
return ArtifactCoords.of(k.getGroupId(), k.getArtifactId(), k.getClassifier(), k.getType(), version);
}
@Deprecated
public static ArtifactCoords toCoords(final Extension e) {
return e.getArtifact();
}
public static ArtifactCoords toCoords(final Dependency d, final String overrideVersion) {
return overrideVersion(toCoords(d), overrideVersion);
}
public static String toGAV(ArtifactCoords c) {
if (c.getVersion() == null) {
return toGA(c);
}
return c.getGroupId() + ":" + c.getArtifactId() + ":" + c.getVersion();
}
public static String toGA(ArtifactCoords c) {
return c.getGroupId() + ":" + c.getArtifactId();
}
public static String toGA(ArtifactKey c) {
return c.getGroupId() + ":" + c.getArtifactId();
}
public static String toGA(Extension e) {
return e.getArtifact().getGroupId() + ":" + e.getArtifact().getArtifactId();
}
public static ArtifactCoords stripVersion(final ArtifactCoords coords) {
return overrideVersion(coords, null);
}
public static ArtifactCoords overrideVersion(final ArtifactCoords coords, final String overrideVersion) {
return ArtifactCoords.of(coords.getGroupId(), coords.getArtifactId(), coords.getClassifier(), coords.getType(),
overrideVersion);
}
public static ArtifactCoords toCoords(final Dependency d) {
return ArtifactCoords.of(d.getGroupId(), d.getArtifactId(), d.getClassifier(), d.getType(), d.getVersion());
}
}
|
Extensions
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
|
{
"start": 21389,
"end": 24892
}
|
class ____
extends org.apache.hadoop.mapreduce.StatusReporter
implements Runnable, Reporter {
private TaskUmbilicalProtocol umbilical;
private InputSplit split = null;
private Progress taskProgress;
private Thread pingThread = null;
private boolean done = true;
private Object lock = new Object();
private volatile String diskLimitCheckStatus = null;
private Thread diskLimitCheckThread = null;
/**
* flag that indicates whether progress update needs to be sent to parent.
* If true, it has been set. If false, it has been reset.
* Using AtomicBoolean since we need an atomic read & reset method.
*/
private AtomicBoolean progressFlag = new AtomicBoolean(false);
@VisibleForTesting
public TaskReporter(Progress taskProgress,
TaskUmbilicalProtocol umbilical) {
this.umbilical = umbilical;
this.taskProgress = taskProgress;
}
// getters and setters for flag
void setProgressFlag() {
progressFlag.set(true);
}
boolean resetProgressFlag() {
return progressFlag.getAndSet(false);
}
public void setStatus(String status) {
taskProgress.setStatus(normalizeStatus(status, conf));
// indicate that progress update needs to be sent
setProgressFlag();
}
public void setProgress(float progress) {
// set current phase progress.
// This method assumes that task has phases.
taskProgress.phase().set(progress);
// indicate that progress update needs to be sent
setProgressFlag();
}
public float getProgress() {
return taskProgress.getProgress();
};
public void progress() {
// indicate that progress update needs to be sent
setProgressFlag();
}
public Counters.Counter getCounter(String group, String name) {
Counters.Counter counter = null;
if (counters != null) {
counter = counters.findCounter(group, name);
}
return counter;
}
public Counters.Counter getCounter(Enum<?> name) {
return counters == null ? null : counters.findCounter(name);
}
public void incrCounter(Enum key, long amount) {
if (counters != null) {
counters.incrCounter(key, amount);
}
setProgressFlag();
}
public void incrCounter(String group, String counter, long amount) {
if (counters != null) {
counters.incrCounter(group, counter, amount);
}
if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && (
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) ||
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) {
//if application reports the processed records, move the
//currentRecStartIndex to the next.
//currentRecStartIndex is the start index which has not yet been
//finished and is still in task's stomach.
for(int i=0;i<amount;i++) {
currentRecStartIndex = currentRecIndexIterator.next();
}
}
setProgressFlag();
}
public void setInputSplit(InputSplit split) {
this.split = split;
}
public InputSplit getInputSplit() throws UnsupportedOperationException {
if (split == null) {
throw new UnsupportedOperationException("Input only available on map");
} else {
return split;
}
}
/**
* exception thrown when the task exceeds some configured limits.
*/
public
|
TaskReporter
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/ActiveProfilesTestMethodScopedExtensionContextNestedTests.java
|
{
"start": 3492,
"end": 3844
}
|
class ____ {
@Autowired
List<String> localStrings;
@Test
void test() {
assertThat(strings)
.isEqualTo(this.localStrings)
.containsExactlyInAnyOrder("X", "Y", "Z", "A3");
}
@Nested
@NestedTestConfiguration(INHERIT)
@ActiveProfiles(profiles = "2", inheritProfiles = false)
|
DoubleNestedWithOverriddenConfigTests
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ec2/src/test/java/org/apache/camel/component/aws2/ec2/EC2OperationsTest.java
|
{
"start": 954,
"end": 3329
}
|
class ____ {
@Test
public void supportedOperationCount() {
assertEquals(11, AWS2EC2Operations.values().length);
}
@Test
public void valueOf() {
assertEquals(AWS2EC2Operations.createAndRunInstances, AWS2EC2Operations.valueOf("createAndRunInstances"));
assertEquals(AWS2EC2Operations.startInstances, AWS2EC2Operations.valueOf("startInstances"));
assertEquals(AWS2EC2Operations.stopInstances, AWS2EC2Operations.valueOf("stopInstances"));
assertEquals(AWS2EC2Operations.terminateInstances, AWS2EC2Operations.valueOf("terminateInstances"));
assertEquals(AWS2EC2Operations.describeInstances, AWS2EC2Operations.valueOf("describeInstances"));
assertEquals(AWS2EC2Operations.describeInstancesStatus, AWS2EC2Operations.valueOf("describeInstancesStatus"));
assertEquals(AWS2EC2Operations.rebootInstances, AWS2EC2Operations.valueOf("rebootInstances"));
assertEquals(AWS2EC2Operations.monitorInstances, AWS2EC2Operations.valueOf("monitorInstances"));
assertEquals(AWS2EC2Operations.unmonitorInstances, AWS2EC2Operations.valueOf("unmonitorInstances"));
assertEquals(AWS2EC2Operations.createTags, AWS2EC2Operations.valueOf("createTags"));
assertEquals(AWS2EC2Operations.deleteTags, AWS2EC2Operations.valueOf("deleteTags"));
}
@Test
public void testToString() {
assertEquals("createAndRunInstances", AWS2EC2Operations.createAndRunInstances.toString());
assertEquals("startInstances", AWS2EC2Operations.startInstances.toString());
assertEquals("stopInstances", AWS2EC2Operations.stopInstances.toString());
assertEquals("terminateInstances", AWS2EC2Operations.terminateInstances.toString());
assertEquals("describeInstances", AWS2EC2Operations.describeInstances.toString());
assertEquals("describeInstancesStatus", AWS2EC2Operations.describeInstancesStatus.toString());
assertEquals("rebootInstances", AWS2EC2Operations.rebootInstances.toString());
assertEquals("monitorInstances", AWS2EC2Operations.monitorInstances.toString());
assertEquals("unmonitorInstances", AWS2EC2Operations.unmonitorInstances.toString());
assertEquals("createTags", AWS2EC2Operations.createTags.toString());
assertEquals("deleteTags", AWS2EC2Operations.deleteTags.toString());
}
}
|
EC2OperationsTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-flyway/src/main/java/org/springframework/boot/flyway/autoconfigure/FlywayAutoConfiguration.java
|
{
"start": 19731,
"end": 20488
}
|
class ____ implements GenericConverter {
private static final Set<ConvertiblePair> CONVERTIBLE_TYPES;
static {
Set<ConvertiblePair> types = new HashSet<>(2);
types.add(new ConvertiblePair(String.class, MigrationVersion.class));
types.add(new ConvertiblePair(Number.class, MigrationVersion.class));
CONVERTIBLE_TYPES = Collections.unmodifiableSet(types);
}
@Override
public Set<ConvertiblePair> getConvertibleTypes() {
return CONVERTIBLE_TYPES;
}
@Override
public Object convert(@Nullable Object source, TypeDescriptor sourceType, TypeDescriptor targetType) {
String value = ObjectUtils.nullSafeToString(source);
return MigrationVersion.fromVersion(value);
}
}
static final
|
StringOrNumberToMigrationVersionConverter
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Athena2EndpointBuilderFactory.java
|
{
"start": 1538,
"end": 27091
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedAthena2EndpointBuilder advanced() {
return (AdvancedAthena2EndpointBuilder) this;
}
/**
* The Athena database to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param database the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder database(String database) {
doSetProperty("database", database);
return this;
}
/**
* Milliseconds before the next poll for query execution status. See the
* section Waiting for Query Completion and Retrying Failed Queries to
* learn more.
*
* The option is a: <code>long</code> type.
*
* Default: 2000
* Group: producer
*
* @param delay the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll for query execution status. See the
* section Waiting for Query Completion and Retrying Failed Queries to
* learn more.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 2000
* Group: producer
*
* @param delay the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the first poll for query execution status. See
* the section Waiting for Query Completion and Retrying Failed Queries
* to learn more.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: producer
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll for query execution status. See
* the section Waiting for Query Completion and Retrying Failed Queries
* to learn more.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: producer
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Maximum number of times to attempt a query. Set to 1 to disable
* retries. See the section Waiting for Query Completion and Retrying
* Failed Queries to learn more.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: producer
*
* @param maxAttempts the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder maxAttempts(int maxAttempts) {
doSetProperty("maxAttempts", maxAttempts);
return this;
}
/**
* Maximum number of times to attempt a query. Set to 1 to disable
* retries. See the section Waiting for Query Completion and Retrying
* Failed Queries to learn more.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: producer
*
* @param maxAttempts the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder maxAttempts(String maxAttempts) {
doSetProperty("maxAttempts", maxAttempts);
return this;
}
/**
* Max number of results to return for the given operation (if supported
* by the Athena API endpoint). If not set, will use the Athena API
* default for the given operation.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: producer
*
* @param maxResults the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder maxResults(Integer maxResults) {
doSetProperty("maxResults", maxResults);
return this;
}
/**
* Max number of results to return for the given operation (if supported
* by the Athena API endpoint). If not set, will use the Athena API
* default for the given operation.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: producer
*
* @param maxResults the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder maxResults(String maxResults) {
doSetProperty("maxResults", maxResults);
return this;
}
/**
* Pagination token to use in the case where the response from the
* previous request was truncated.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param nextToken the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder nextToken(String nextToken) {
doSetProperty("nextToken", nextToken);
return this;
}
/**
* The Athena API function to call.
*
* The option is a:
* <code>org.apache.camel.component.aws2.athena.Athena2Operations</code>
* type.
*
* Default: startQueryExecution
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder operation(org.apache.camel.component.aws2.athena.Athena2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The Athena API function to call.
*
* The option will be converted to a
* <code>org.apache.camel.component.aws2.athena.Athena2Operations</code>
* type.
*
* Default: startQueryExecution
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The location in Amazon S3 where query results are stored, such as
* s3://path/to/query/bucket/. Ensure this value ends with a forward
* slash.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param outputLocation the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder outputLocation(String outputLocation) {
doSetProperty("outputLocation", outputLocation);
return this;
}
/**
* How query results should be returned. One of StreamList (default -
* return a GetQueryResultsIterable that can page through all results),
* SelectList (returns at most 1000 rows at a time, plus a NextToken
* value as a header than can be used for manual pagination of results),
* S3Pointer (return an S3 path pointing to the results).
*
* The option is a:
* <code>org.apache.camel.component.aws2.athena.Athena2OutputType</code>
* type.
*
* Default: StreamList
* Group: producer
*
* @param outputType the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder outputType(org.apache.camel.component.aws2.athena.Athena2OutputType outputType) {
doSetProperty("outputType", outputType);
return this;
}
/**
* How query results should be returned. One of StreamList (default -
* return a GetQueryResultsIterable that can page through all results),
* SelectList (returns at most 1000 rows at a time, plus a NextToken
* value as a header than can be used for manual pagination of results),
* S3Pointer (return an S3 path pointing to the results).
*
* The option will be converted to a
* <code>org.apache.camel.component.aws2.athena.Athena2OutputType</code>
* type.
*
* Default: StreamList
* Group: producer
*
* @param outputType the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder outputType(String outputType) {
doSetProperty("outputType", outputType);
return this;
}
/**
* The unique ID identifying the query execution.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param queryExecutionId the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder queryExecutionId(String queryExecutionId) {
doSetProperty("queryExecutionId", queryExecutionId);
return this;
}
/**
* The SQL query to run. Except for simple queries, prefer setting this
* as the body of the Exchange or as a header using
* Athena2Constants.QUERY_STRING to avoid having to deal with URL
* encoding issues.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param queryString the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder queryString(String queryString) {
doSetProperty("queryString", queryString);
return this;
}
/**
* The region in which Athena client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example ap-east-1).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder region(String region) {
doSetProperty("region", region);
return this;
}
/**
* Reset the waitTimeout countdown in the event of a query retry. If set
* to true, potential max time spent waiting for queries is equal to
* waitTimeout x maxAttempts. See the section Waiting for Query
* Completion and Retrying Failed Queries to learn more.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param resetWaitTimeoutOnRetry the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder resetWaitTimeoutOnRetry(boolean resetWaitTimeoutOnRetry) {
doSetProperty("resetWaitTimeoutOnRetry", resetWaitTimeoutOnRetry);
return this;
}
/**
* Reset the waitTimeout countdown in the event of a query retry. If set
* to true, potential max time spent waiting for queries is equal to
* waitTimeout x maxAttempts. See the section Waiting for Query
* Completion and Retrying Failed Queries to learn more.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param resetWaitTimeoutOnRetry the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder resetWaitTimeoutOnRetry(String resetWaitTimeoutOnRetry) {
doSetProperty("resetWaitTimeoutOnRetry", resetWaitTimeoutOnRetry);
return this;
}
/**
* Optional comma separated list of error types to retry the query for.
* Use: 'retryable' to retry all retryable failure conditions (e.g.
* generic errors and resources exhausted), 'generic' to retry
* 'GENERIC_INTERNAL_ERROR' failures, 'exhausted' to retry queries that
* have exhausted resource limits, 'always' to always retry regardless
* of failure condition, or 'never' or null to never retry (default).
* See the section Waiting for Query Completion and Retrying Failed
* Queries to learn more.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: never
* Group: producer
*
* @param retry the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder retry(String retry) {
doSetProperty("retry", retry);
return this;
}
/**
* Optional max wait time in millis to wait for a successful query
* completion. See the section Waiting for Query Completion and Retrying
* Failed Queries to learn more.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: producer
*
* @param waitTimeout the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder waitTimeout(long waitTimeout) {
doSetProperty("waitTimeout", waitTimeout);
return this;
}
/**
* Optional max wait time in millis to wait for a successful query
* completion. See the section Waiting for Query Completion and Retrying
* Failed Queries to learn more.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: producer
*
* @param waitTimeout the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder waitTimeout(String waitTimeout) {
doSetProperty("waitTimeout", waitTimeout);
return this;
}
/**
* The workgroup to use for running the query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param workGroup the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder workGroup(String workGroup) {
doSetProperty("workGroup", workGroup);
return this;
}
/**
* To define a proxy host when instantiating the Athena client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder proxyHost(String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the Athena client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder proxyPort(Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy port when instantiating the Athena client.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder proxyPort(String proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the Athena client.
*
* The option is a: <code>software.amazon.awssdk.core.Protocol</code>
* type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* To define a proxy protocol when instantiating the Athena client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder proxyProtocol(String proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder accessKey(String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* The encryption type to use when storing query results in S3. One of
* SSE_S3, SSE_KMS, or CSE_KMS.
*
* The option is a:
* <code>software.amazon.awssdk.services.athena.model.EncryptionOption</code> type.
*
* Group: security
*
* @param encryptionOption the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder encryptionOption(software.amazon.awssdk.services.athena.model.EncryptionOption encryptionOption) {
doSetProperty("encryptionOption", encryptionOption);
return this;
}
/**
* The encryption type to use when storing query results in S3. One of
* SSE_S3, SSE_KMS, or CSE_KMS.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.athena.model.EncryptionOption</code> type.
*
* Group: security
*
* @param encryptionOption the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder encryptionOption(String encryptionOption) {
doSetProperty("encryptionOption", encryptionOption);
return this;
}
/**
* For SSE-KMS and CSE-KMS, this is the KMS key ARN or ID.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param kmsKey the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder kmsKey(String kmsKey) {
doSetProperty("kmsKey", kmsKey);
return this;
}
/**
* If using a profile credentials provider, this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder profileCredentialsName(String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder secretKey(String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder sessionToken(String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* Set whether the Athena client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Athena client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useDefaultCredentialsProvider(String useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Athena client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Athena client should expect to load credentials
* through a profile credentials provider.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useProfileCredentialsProvider(String useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Athena client should expect to use Session
* Credentials. This is useful in a situation in which the user needs to
* assume an IAM role for doing operations in Athena.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
/**
* Set whether the Athena client should expect to use Session
* Credentials. This is useful in a situation in which the user needs to
* assume an IAM role for doing operations in Athena.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Athena2EndpointBuilder useSessionCredentials(String useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
/**
* Advanced builder for endpoint for the AWS Athena component.
*/
public
|
Athena2EndpointBuilder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java
|
{
"start": 5357,
"end": 5649
}
|
class ____ {
void run() {
try {
run();
} catch (StackOverflowError stack_overflow) {
}
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/PutShutdownNodeAction.java
|
{
"start": 1094,
"end": 1407
}
|
class ____ extends ActionType<AcknowledgedResponse> {
public static final PutShutdownNodeAction INSTANCE = new PutShutdownNodeAction();
public static final String NAME = "cluster:admin/shutdown/create";
public PutShutdownNodeAction() {
super(NAME);
}
public static
|
PutShutdownNodeAction
|
java
|
dropwizard__dropwizard
|
dropwizard-jersey/src/test/java/io/dropwizard/jersey/optional/OptionalQueryParamResourceTest.java
|
{
"start": 3647,
"end": 4340
}
|
class ____ {
@GET
@Path("/message")
public String getMessage(@QueryParam("message") Optional<String> message) {
return message.orElse("Default Message");
}
@GET
@Path("/my-message")
public String getMyMessage(@QueryParam("mymessage") Optional<MyMessage> myMessage) {
return myMessage.orElse(new MyMessage("My Default Message")).getMessage();
}
@GET
@Path("/uuid")
public String getUUID(@QueryParam("uuid") Optional<UUIDParam> uuid) {
return uuid.orElse(new UUIDParam("d5672fa8-326b-40f6-bf71-d9dacf44bcdc")).get().toString();
}
}
}
|
OptionalQueryParamResource
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/HibernateOrmPersistenceUnitProviderHelper.java
|
{
"start": 195,
"end": 559
}
|
class ____ implements QuarkusPersistenceUnitProviderHelper {
@Override
public boolean isActive(String persistenceUnitName) {
var instance = Arc.container().select(SessionFactory.class, qualifier(persistenceUnitName));
return instance.isResolvable() && instance.getHandle().getBean().isActive();
}
}
|
HibernateOrmPersistenceUnitProviderHelper
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/datasource/ConfigNamedPUDatasourceActiveFalseStaticInjectionTest.java
|
{
"start": 1848,
"end": 2062
}
|
class ____ {
@Inject
@PersistenceUnit("mypu")
Session session;
@Transactional
public void useHibernate() {
session.find(MyEntity.class, 1L);
}
}
}
|
MyBean
|
java
|
google__guice
|
core/test/com/google/inject/CircularDependencyTest.java
|
{
"start": 18943,
"end": 20494
}
|
class ____ implements Scope {
private static Map<Key<?>, Object> cache = Maps.newHashMap();
@Override
public <T> Provider<T> scope(final Key<T> key, final Provider<T> unscoped) {
return new Provider<T>() {
@Override
@SuppressWarnings("unchecked")
public T get() {
if (!cache.containsKey(key)) {
T t = unscoped.get();
if (Scopes.isCircularProxy(t)) {
return t;
}
cache.put(key, t);
}
return (T) cache.get(key);
}
};
}
}
public void testDisabledNonConstructorCircularDependencies() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
binder().disableCircularProxies();
}
});
try {
injector.getInstance(K.class);
fail("expected exception");
} catch (ProvisionException expected) {
assertContains(
expected.getMessage(),
"Found a circular dependency involving CircularDependencyTest$K"
+ ", and circular dependencies are disabled.");
}
try {
injector.getInstance(L.class);
fail("expected exception");
} catch (ProvisionException expected) {
assertContains(
expected.getMessage(),
"Found a circular dependency involving CircularDependencyTest$L"
+ ", and circular dependencies are disabled.");
}
}
static
|
BasicSingleton
|
java
|
qos-ch__slf4j
|
jcl-over-slf4j/src/test/java/org/apache/commons/logging/test/InvokeJCLTest.java
|
{
"start": 4075,
"end": 4358
}
|
class ____ {
private final String msg;
int invokedCount = 0;
TestMessage(String msg) {
this.msg = msg;
}
@Override
public String toString() {
invokedCount++;
return msg;
}
}
}
|
TestMessage
|
java
|
spring-projects__spring-boot
|
module/spring-boot-validation/src/test/java/org/springframework/boot/validation/autoconfigure/ValidatorAdapterTests.java
|
{
"start": 5652,
"end": 6504
}
|
class ____ implements SmartValidator {
private final SmartValidator delegate;
Wrapper(SmartValidator delegate) {
this.delegate = delegate;
}
@Override
public boolean supports(Class<?> type) {
return this.delegate.supports(type);
}
@Override
public void validate(Object target, Errors errors) {
this.delegate.validate(target, errors);
}
@Override
public void validate(Object target, Errors errors, Object... validationHints) {
this.delegate.validate(target, errors, validationHints);
}
@Override
@SuppressWarnings("unchecked")
public <T> @Nullable T unwrap(@Nullable Class<T> type) {
if (type != null && type.isInstance(this.delegate)) {
return (T) this.delegate;
}
return this.delegate.unwrap(type);
}
}
}
@Configuration(proxyBeanMethods = false)
static
|
Wrapper
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/strings/Strings_assertMatches_Pattern_Test.java
|
{
"start": 1689,
"end": 4482
}
|
class ____ extends StringsBaseTest {
private String actual = "Yoda";
@Test
void should_throw_error_if_Pattern_is_null() {
assertThatNullPointerException().isThrownBy(() -> {
Pattern pattern = null;
strings.assertMatches(someInfo(), actual, pattern);
}).withMessage(regexPatternIsNull());
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertMatches(someInfo(), null, matchAnything()))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_match_Pattern() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertMatches(someInfo(), actual,
Pattern.compile("Luke")))
.withMessage(shouldMatch(actual, "Luke").create());
}
@Test
void should_pass_if_actual_matches_Pattern() {
strings.assertMatches(someInfo(), actual, Pattern.compile("Yod.*"));
}
@Test
void should_throw_error_if_Pattern_is_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> {
Pattern pattern = null;
stringsWithCaseInsensitiveComparisonStrategy.assertMatches(someInfo(), actual, pattern);
}).withMessage(regexPatternIsNull());
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> stringsWithCaseInsensitiveComparisonStrategy.assertMatches(someInfo(),
null,
matchAnything()))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_match_Pattern_whatever_custom_comparison_strategy_is() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> stringsWithCaseInsensitiveComparisonStrategy.assertMatches(info, actual,
Pattern.compile("Luke")));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldMatch(actual, "Luke"));
}
@Test
void should_pass_if_actual_matches_Pattern_whatever_custom_comparison_strategy_is() {
stringsWithCaseInsensitiveComparisonStrategy.assertMatches(someInfo(), actual, Pattern.compile("Yod.*"));
}
}
|
Strings_assertMatches_Pattern_Test
|
java
|
apache__camel
|
components/camel-netty/src/test/java/org/apache/camel/component/netty/NettyGlobalSSLContextParametersTest.java
|
{
"start": 1652,
"end": 4211
}
|
class ____ extends BaseNettyTest {
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
KeyStoreParameters ksp = new KeyStoreParameters();
ksp.setResource(this.getClass().getClassLoader().getResource("keystore.jks").toString());
ksp.setPassword("changeit");
KeyManagersParameters kmp = new KeyManagersParameters();
kmp.setKeyPassword("changeit");
kmp.setKeyStore(ksp);
TrustManagersParameters tmp = new TrustManagersParameters();
tmp.setKeyStore(ksp);
// NOTE: Needed since the client uses a loose trust configuration when no ssl context
// is provided. We turn on WANT client-auth to prefer using authentication
SSLContextServerParameters scsp = new SSLContextServerParameters();
scsp.setClientAuthentication(ClientAuthentication.WANT.name());
SSLContextParameters sslContextParameters = new SSLContextParameters();
sslContextParameters.setKeyManagers(kmp);
sslContextParameters.setTrustManagers(tmp);
sslContextParameters.setServerParameters(scsp);
context.setSSLContextParameters(sslContextParameters);
((SSLContextParametersAware) context.getComponent("netty")).setUseGlobalSslContextParameters(true);
return context;
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testSSLInOutWithNettyConsumer() throws Exception {
context.addRoutes(new RouteBuilder() {
public void configure() {
from("netty:tcp://localhost:{{port}}?sync=true&ssl=true")
.process(new Processor() {
public void process(Exchange exchange) {
exchange.getMessage().setBody(
"When You Go Home, Tell Them Of Us And Say, For Your Tomorrow, We Gave Our Today.");
}
});
}
});
context.start();
String response = template.requestBody(
"netty:tcp://localhost:{{port}}?sync=true&ssl=true",
"Epitaph in Kohima, India marking the WWII Battle of Kohima and Imphal, Burma Campaign - Attributed to John Maxwell Edmonds",
String.class);
assertEquals("When You Go Home, Tell Them Of Us And Say, For Your Tomorrow, We Gave Our Today.", response);
}
}
|
NettyGlobalSSLContextParametersTest
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/map/WriteMode.java
|
{
"start": 689,
"end": 1105
}
|
enum ____ {
/**
* In write behind mode all data written in map object
* also written using MapWriter in asynchronous mode.
*/
WRITE_BEHIND,
/**
* In write through mode all write operations for map object
* are synchronized with MapWriter write operations.
* If MapWriter throws an error then it will be re-thrown to Map operation caller.
*/
WRITE_THROUGH
}
|
WriteMode
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingHandlerMappingTests.java
|
{
"start": 21701,
"end": 21859
}
|
class ____ {
@GetMapping("/{id}")
public Principal getUser() {
return mock();
}
}
@RestController
@HttpExchange("/exchange")
static
|
UserController
|
java
|
apache__camel
|
components/camel-openapi-java/src/test/java/org/apache/camel/openapi/model/SampleComplexResponseTypeWithSchemaAnnotation.java
|
{
"start": 1025,
"end": 1782
}
|
class ____ {
@JsonProperty(required = true)
private String responseField1 = "Response Field 1";
private String responseField2 = "Response Field 2";
private String[] arrayOfStrings;
private Month month;
private InnerClass innerClass;
public String getResponseField1() {
return responseField1;
}
public String getResponseField2() {
return responseField2;
}
@JsonProperty(required = true)
public String[] getArrayOfStrings() {
return arrayOfStrings;
}
public Month getMonth() {
return month;
}
public InnerClass getInnerClass() {
return innerClass;
}
@Schema(name = "responseInner")
public static
|
SampleComplexResponseTypeWithSchemaAnnotation
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/FieldMissingNullableTest.java
|
{
"start": 2542,
"end": 3248
}
|
class ____ {
private String message;
public void setMessage(String message) {
if (message == null) {
// BUG: Diagnostic contains: @Nullable
this.message = message;
} else {
this.message = "hello";
}
}
}
""")
.doTest();
}
@Test
public void maybeNullAssignment() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/FieldMissingNullTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
public
|
FieldMissingNullTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2901/Issue2901Test.java
|
{
"start": 380,
"end": 557
}
|
class ____ {
@ProcessorTest
@WithClasses( { Source.class, Target.class, ConditionWithTargetTypeOnCollectionMapper.class } )
void shouldCompile() {
}
}
|
Issue2901Test
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/test/java/org/springframework/jdbc/support/rowset/ResultSetWrappingRowSetTests.java
|
{
"start": 1271,
"end": 8002
}
|
class ____ {
private final ResultSet resultSet = mock();
private final ResultSetWrappingSqlRowSet rowSet = new ResultSetWrappingSqlRowSet(resultSet);
@Test
void testGetBigDecimalInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getBigDecimal", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getBigDecimal", int.class);
doTest(rset, rowset, 1, BigDecimal.ONE);
}
@Test
void testGetBigDecimalString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getBigDecimal", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getBigDecimal", String.class);
doTest(rset, rowset, "test", BigDecimal.ONE);
}
@Test
void testGetStringInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getString", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getString", int.class);
doTest(rset, rowset, 1, "test");
}
@Test
void testGetStringString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getString", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getString", String.class);
doTest(rset, rowset, "test", "test");
}
@Test
void testGetTimestampInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getTimestamp", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getTimestamp", int.class);
doTest(rset, rowset, 1, new Timestamp(1234L));
}
@Test
void testGetTimestampString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getTimestamp", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getTimestamp", String.class);
doTest(rset, rowset, "test", new Timestamp(1234L));
}
@Test
void testGetDateInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getDate", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getDate", int.class);
doTest(rset, rowset, 1, new Date(1234L));
}
@Test
void testGetDateString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getDate", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getDate", String.class);
doTest(rset, rowset, "test", new Date(1234L));
}
@Test
void testGetTimeInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getTime", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getTime", int.class);
doTest(rset, rowset, 1, new Time(1234L));
}
@Test
void testGetTimeString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getTime", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getTime", String.class);
doTest(rset, rowset, "test", new Time(1234L));
}
@Test
void testGetObjectInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getObject", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getObject", int.class);
doTest(rset, rowset, 1, new Object());
}
@Test
void testGetObjectString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getObject", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getObject", String.class);
doTest(rset, rowset, "test", new Object());
}
@Test
void testGetIntInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getInt", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getInt", int.class);
doTest(rset, rowset, 1, 1);
}
@Test
void testGetIntString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getInt", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getInt", String.class);
doTest(rset, rowset, "test", 1);
}
@Test
void testGetFloatInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getFloat", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getFloat", int.class);
doTest(rset, rowset, 1, 1.0f);
}
@Test
void testGetFloatString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getFloat", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getFloat", String.class);
doTest(rset, rowset, "test", 1.0f);
}
@Test
void testGetDoubleInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getDouble", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getDouble", int.class);
doTest(rset, rowset, 1, 1.0d);
}
@Test
void testGetDoubleString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getDouble", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getDouble", String.class);
doTest(rset, rowset, "test", 1.0d);
}
@Test
void testGetLongInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getLong", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getLong", int.class);
doTest(rset, rowset, 1, 1L);
}
@Test
void testGetLongString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getLong", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getLong", String.class);
doTest(rset, rowset, "test", 1L);
}
@Test
void testGetBooleanInt() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getBoolean", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getBoolean", int.class);
doTest(rset, rowset, 1, true);
}
@Test
void testGetBooleanString() throws Exception {
Method rset = ResultSet.class.getDeclaredMethod("getBoolean", int.class);
Method rowset = ResultSetWrappingSqlRowSet.class.getDeclaredMethod("getBoolean", String.class);
doTest(rset, rowset, "test", true);
}
private void doTest(Method rsetMethod, Method rowsetMethod, Object arg, Object ret) throws Exception {
if (arg instanceof String) {
given(resultSet.findColumn((String) arg)).willReturn(1);
given(rsetMethod.invoke(resultSet, 1)).willReturn(ret).willThrow(new SQLException("test"));
}
else {
given(rsetMethod.invoke(resultSet, arg)).willReturn(ret).willThrow(new SQLException("test"));
}
rowsetMethod.invoke(rowSet, arg);
assertThatExceptionOfType(InvocationTargetException.class)
.isThrownBy(() -> rowsetMethod.invoke(rowSet, arg))
.satisfies(ex -> assertThat(ex.getTargetException()).isExactlyInstanceOf(InvalidResultSetAccessException.class));
}
}
|
ResultSetWrappingRowSetTests
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/QuteProcessorTest.java
|
{
"start": 591,
"end": 2256
}
|
class ____ {
@Test
public void testTemplateDataIgnorePattern() {
List<String> names = List.of("foo", "bar");
Pattern p = Pattern.compile(QuteProcessor.buildIgnorePattern(names));
// Ignore "baz" and "getFoo"
assertTrue(p.matcher("baz").matches());
assertTrue(p.matcher("getFoo").matches());
// Do not ignore "foo" and "bar"
for (String name : names) {
assertFalse(p.matcher(name).matches());
}
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> QuteProcessor.buildIgnorePattern(List.of()));
}
@Test
public void testCollectNamespaceExpressions() {
Template template = Engine.builder().build().parse("{msg:hello} {msg2:hello_alpha} {foo:baz.get(foo:bar)}");
TemplateAnalysis analysis = new TemplateAnalysis("foo", template, null);
Set<Expression> msg = QuteProcessor.collectNamespaceExpressions(analysis, "msg");
assertEquals(1, msg.size());
assertEquals("msg:hello", msg.iterator().next().toOriginalString());
Set<Expression> msg2 = QuteProcessor.collectNamespaceExpressions(analysis, "msg2");
assertEquals(1, msg2.size());
assertEquals("msg2:hello_alpha", msg2.iterator().next().toOriginalString());
Set<Expression> foo = QuteProcessor.collectNamespaceExpressions(analysis, "foo");
assertEquals(2, foo.size());
for (Expression fooExpr : foo) {
assertTrue(
fooExpr.toOriginalString().equals("foo:bar") || fooExpr.toOriginalString().equals("foo:baz.get(foo:bar)"));
}
}
}
|
QuteProcessorTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/access/NestedEmbeddableDefaultAccessTests.java
|
{
"start": 3738,
"end": 3960
}
|
class ____ {
@Convert( converter = SillyConverter.class )
@Column( name = "outer_data" )
private String outerData;
@Embedded
private NestedEmbeddable nestedEmbeddable;
}
@Embeddable
public static
|
OuterEmbeddable
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/DifferentNameButSameTest.java
|
{
"start": 6731,
"end": 7067
}
|
class ____ implements A<String> {}
B.D b();
C.D c();
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void typesDefinedWithinSameFileIgnored() {
helper
.addInputLines(
"Test.java",
"""
package pkg;
|
C
|
java
|
processing__processing4
|
core/src/processing/data/FloatDict.java
|
{
"start": 189,
"end": 366
}
|
class ____ use a <b>String</b> as a lookup for a float value. String "keys"
* are associated with floating-point values.
*
* @webref data:composite
* @webBrief A simple table
|
to
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
|
{
"start": 1755,
"end": 4771
}
|
class ____ extends AbstractProviderService
implements DockerKeys {
@Override
public void processArtifact(AbstractLauncher launcher,
ComponentInstance compInstance, SliderFileSystem fileSystem,
Service service, ContainerLaunchService.ComponentLaunchContext
compLaunchCtx) throws IOException{
launcher.setYarnDockerMode(true);
launcher.setDockerImage(compLaunchCtx.getArtifact().getId());
launcher.setDockerNetwork(compLaunchCtx.getConfiguration()
.getProperty(DOCKER_NETWORK));
launcher.setDockerHostname(compInstance.getHostname());
launcher.setRunPrivilegedContainer(
compLaunchCtx.isRunPrivilegedContainer());
}
/**
* Check if system is default to disable docker override or
* user requested a Docker container with ENTRY_POINT support.
*
* @param compLaunchContext - launch context for the component.
* @return true if Docker launch command override is disabled
*/
private boolean checkUseEntryPoint(
ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
boolean overrideDisable = false;
String overrideDisableKey = Environment.
YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.
name();
String overrideDisableValue = (
compLaunchContext.getConfiguration().getEnv(overrideDisableKey)
!= null) ?
compLaunchContext.getConfiguration().getEnv(
overrideDisableKey) : System.getenv(overrideDisableKey);
overrideDisable = Boolean.parseBoolean(overrideDisableValue);
return overrideDisable;
}
@Override
public void buildContainerLaunchCommand(AbstractLauncher launcher,
Service service, ComponentInstance instance,
SliderFileSystem fileSystem, Configuration yarnConf, Container container,
ContainerLaunchService.ComponentLaunchContext compLaunchContext,
Map<String, String> tokensForSubstitution)
throws IOException, SliderException {
boolean useEntryPoint = checkUseEntryPoint(compLaunchContext);
if (useEntryPoint) {
String launchCommand = compLaunchContext.getLaunchCommand();
if (!StringUtils.isEmpty(launchCommand)) {
if(launchCommand.contains(" ")) {
// convert space delimiter command to exec format
launchCommand = ProviderUtils
.replaceSpacesWithDelimiter(launchCommand, ",");
}
launcher.addCommand(launchCommand);
}
} else {
// substitute launch command
String launchCommand = compLaunchContext.getLaunchCommand();
// docker container may have empty commands
if (!StringUtils.isEmpty(launchCommand)) {
launchCommand = ProviderUtils
.substituteStrWithTokens(launchCommand, tokensForSubstitution);
CommandLineBuilder operation = new CommandLineBuilder();
operation.add(launchCommand);
operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
launcher.addCommand(operation.build());
}
}
}
}
|
DockerProviderService
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/spi/endpoint/EndpointResolver.java
|
{
"start": 1081,
"end": 2609
}
|
interface ____<A extends Address, S, D, E> {
/**
* Try to cast the {@code address} to an address instance that can be resolved by this resolver instance.
*
* @param address the address to cast
* @return the address or {@code null} when the {@code address} cannot be resolved by this resolver
*/
A tryCast(Address address);
/**
* Returns the socket address of a given endpoint {@code server}.
*
* @param server the endpoint server
* @return the server socket address
*/
SocketAddress addressOf(S server);
/**
* Returns the known properties of a given {@code server}.
*
* @param server the endpoint
* @return the properties as a JSON object
*/
default JsonObject propertiesOf(S server) {
return new JsonObject();
}
/**
* Resolve an address to the resolver state for this name.
*
* @param address the address to resolve
* @param builder the endpoint builder
* @return a future notified with the result
*/
Future<D> resolve(A address, EndpointBuilder<E, S> builder);
/**
* Return the current endpoint visible by the resolver.
*
* @param state the resolver state
* @return the list of endpoints
*/
E endpoint(D state);
/**
* Check the state validity.
*
* @param state resolver state
* @return the state validity
*/
boolean isValid(D state);
/**
* Dispose the state.
*
* @param data the state
*/
void dispose(D data);
/**
* Close this resolver.
*/
void close();
}
|
EndpointResolver
|
java
|
grpc__grpc-java
|
services/src/main/java/io/grpc/protobuf/services/HealthCheckingLoadBalancerUtil.java
|
{
"start": 1086,
"end": 1408
}
|
class ____ {
private HealthCheckingLoadBalancerUtil() {
}
/**
* Creates a health-checking-capable LoadBalancer. This method is used to implement
* health-checking-capable {@link io.grpc.LoadBalancer.Factory}s, which will typically written
* this way:
*
* <pre>
* public
|
HealthCheckingLoadBalancerUtil
|
java
|
elastic__elasticsearch
|
modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java
|
{
"start": 854,
"end": 11592
}
|
class ____ extends ESTestCase {
public void testUriParts() throws Exception {
// simple URI
testUriParsing("http://www.google.com", Map.of("scheme", "http", "domain", "www.google.com", "path", ""));
// custom port
testUriParsing("http://www.google.com:88", Map.of("scheme", "http", "domain", "www.google.com", "path", "", "port", 88));
// file
testUriParsing(
"http://www.google.com:88/google.png",
Map.of("scheme", "http", "domain", "www.google.com", "extension", "png", "path", "/google.png", "port", 88)
);
// fragment
testUriParsing(
"https://www.google.com:88/foo#bar",
Map.of("scheme", "https", "domain", "www.google.com", "fragment", "bar", "path", "/foo", "port", 88)
);
// path, extension
testUriParsing(
"https://www.google.com:88/foo.jpg",
Map.of("scheme", "https", "domain", "www.google.com", "path", "/foo.jpg", "extension", "jpg", "port", 88)
);
// query
testUriParsing(
"https://www.google.com:88/foo?key=val",
Map.of("scheme", "https", "domain", "www.google.com", "path", "/foo", "query", "key=val", "port", 88)
);
// user_info
testUriParsing(
"https://user:pw@www.google.com:88/foo",
Map.of(
"scheme",
"https",
"domain",
"www.google.com",
"path",
"/foo",
"port",
88,
"user_info",
"user:pw",
"username",
"user",
"password",
"pw"
)
);
// user_info without password
testUriParsing(
"https://user:@www.google.com:88/foo",
Map.of(
"scheme",
"https",
"domain",
"www.google.com",
"path",
"/foo",
"port",
88,
"user_info",
"user:",
"username",
"user",
"password",
""
)
);
// everything!
testUriParsing(
"https://user:pw@testing.google.com:8080/foo/bar?foo1=bar1&foo2=bar2#anchorVal",
Map.of(
"scheme",
"https",
"domain",
"testing.google.com",
"fragment",
"anchorVal",
"path",
"/foo/bar",
"port",
8080,
"username",
"user",
"password",
"pw",
"user_info",
"user:pw",
"query",
"foo1=bar1&foo2=bar2"
)
);
// non-http schemes
testUriParsing(
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
Map.of("scheme", "ftp", "path", "/rfc/rfc1808.txt", "extension", "txt", "domain", "ftp.is.co.za")
);
testUriParsing("telnet://192.0.2.16:80/", Map.of("scheme", "telnet", "path", "/", "port", 80, "domain", "192.0.2.16"));
testUriParsing(
"ldap://[2001:db8::7]/c=GB?objectClass?one",
Map.of("scheme", "ldap", "path", "/c=GB", "query", "objectClass?one", "domain", "[2001:db8::7]")
);
// keep original
testUriParsing(
true,
false,
"http://www.google.com:88/foo#bar",
Map.of("scheme", "http", "domain", "www.google.com", "fragment", "bar", "path", "/foo", "port", 88)
);
// remove if successful
testUriParsing(
false,
true,
"http://www.google.com:88/foo#bar",
Map.of("scheme", "http", "domain", "www.google.com", "fragment", "bar", "path", "/foo", "port", 88)
);
}
public void testUrlWithCharactersNotToleratedByUri() throws Exception {
testUriParsing(
"http://www.google.com/path with spaces",
Map.of("scheme", "http", "domain", "www.google.com", "path", "/path with spaces")
);
testUriParsing(
"https://user:pw@testing.google.com:8080/foo with space/bar?foo1=bar1&foo2=bar2#anchorVal",
Map.of(
"scheme",
"https",
"domain",
"testing.google.com",
"fragment",
"anchorVal",
"path",
"/foo with space/bar",
"port",
8080,
"username",
"user",
"password",
"pw",
"user_info",
"user:pw",
"query",
"foo1=bar1&foo2=bar2"
)
);
}
public void testDotPathWithoutExtension() throws Exception {
testUriParsing(
"https://www.google.com/path.withdot/filenamewithoutextension",
Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithoutextension")
);
}
public void testDotPathWithExtension() throws Exception {
testUriParsing(
"https://www.google.com/path.withdot/filenamewithextension.txt",
Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithextension.txt", "extension", "txt")
);
}
/**
* This test verifies that we return an empty extension instead of <code>null</code> if the URI ends with a period. This is probably
* not behaviour we necessarily want to keep forever, but this test ensures that we're conscious about changing that behaviour.
*/
public void testEmptyExtension() throws Exception {
testUriParsing(
"https://www.google.com/foo/bar.",
Map.of("scheme", "https", "domain", "www.google.com", "path", "/foo/bar.", "extension", "")
);
}
public void testRemoveIfSuccessfulDoesNotRemoveTargetField() throws Exception {
String field = "field";
UriPartsProcessor processor = new UriPartsProcessor(null, null, field, field, true, false, false);
Map<String, Object> source = new HashMap<>();
source.put(field, "http://www.google.com");
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
IngestDocument output = processor.execute(input);
Map<String, Object> expectedSourceAndMetadata = new HashMap<>();
expectedSourceAndMetadata.put(field, Map.of("scheme", "http", "domain", "www.google.com", "path", ""));
for (Map.Entry<String, Object> entry : expectedSourceAndMetadata.entrySet()) {
assertThat(output.getSourceAndMetadata(), hasEntry(entry.getKey(), entry.getValue()));
}
}
public void testInvalidUri() {
String uri = "not:\\/_a_valid_uri";
UriPartsProcessor processor = new UriPartsProcessor(null, null, "field", "url", true, false, false);
Map<String, Object> source = new HashMap<>();
source.put("field", uri);
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(input));
assertThat(e.getMessage(), containsString("unable to parse URI [" + uri + "]"));
}
public void testNullValue() {
Map<String, Object> source = new HashMap<>();
source.put("field", null);
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
UriPartsProcessor processor = new UriPartsProcessor(null, null, "field", "url", true, false, false);
expectThrows(NullPointerException.class, () -> processor.execute(input));
}
public void testMissingField() {
Map<String, Object> source = new HashMap<>();
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
UriPartsProcessor processor = new UriPartsProcessor(null, null, "field", "url", true, false, false);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(input));
assertThat(e.getMessage(), containsString("field [field] not present as part of path [field]"));
}
public void testIgnoreMissingField() throws Exception {
Map<String, Object> source = new HashMap<>();
// Adding a random field, so we can check the doc is leaved unchanged.
source.put(randomIdentifier(), randomIdentifier());
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
Map<String, Object> expectedSourceAndMetadata = Map.copyOf(input.getSourceAndMetadata());
UriPartsProcessor processor = new UriPartsProcessor(null, null, "field", "url", true, false, true);
IngestDocument output = processor.execute(input);
assertThat(output.getSourceAndMetadata().entrySet(), hasSize(expectedSourceAndMetadata.size()));
for (Map.Entry<String, Object> entry : expectedSourceAndMetadata.entrySet()) {
assertThat(output.getSourceAndMetadata(), hasEntry(entry.getKey(), entry.getValue()));
}
}
private void testUriParsing(String uri, Map<String, Object> expectedValues) throws Exception {
testUriParsing(false, false, uri, expectedValues);
}
private void testUriParsing(boolean keepOriginal, boolean removeIfSuccessful, String uri, Map<String, Object> expectedValues)
throws Exception {
UriPartsProcessor processor = new UriPartsProcessor(null, null, "field", "url", removeIfSuccessful, keepOriginal, false);
Map<String, Object> source = new HashMap<>();
source.put("field", uri);
IngestDocument input = TestIngestDocument.withDefaultVersion(source);
IngestDocument output = processor.execute(input);
Map<String, Object> expectedSourceAndMetadata = new HashMap<>();
if (removeIfSuccessful == false) {
expectedSourceAndMetadata.put("field", uri);
}
Map<String, Object> values;
if (keepOriginal) {
values = new HashMap<>(expectedValues);
values.put("original", uri);
} else {
values = expectedValues;
}
expectedSourceAndMetadata.put("url", values);
for (Map.Entry<String, Object> entry : expectedSourceAndMetadata.entrySet()) {
assertThat(output.getSourceAndMetadata(), hasEntry(entry.getKey(), entry.getValue()));
}
}
}
|
UriPartsProcessorTests
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/framework/AbstractAopProxyTests.java
|
{
"start": 51096,
"end": 51961
}
|
class ____ implements MethodInterceptor {
@Override
public Object invoke(MethodInvocation mi) throws Throwable {
String task = "get invocation on way IN";
try {
MethodInvocation current = ExposeInvocationInterceptor.currentInvocation();
assertThat(current.getMethod()).isEqualTo(mi.getMethod());
Object retval = mi.proceed();
task = "get invocation on way OUT";
assertThat(ExposeInvocationInterceptor.currentInvocation()).isEqualTo(current);
return retval;
}
catch (IllegalStateException ex) {
System.err.println(task + " for " + mi.getMethod());
ex.printStackTrace();
throw ex;
}
}
}
/**
* Same thing for a proxy.
* Only works when exposeProxy is set to true.
* Checks that the proxy is the same on the way in and out.
*/
private static
|
CheckMethodInvocationViaThreadLocalIsSameInAndOutInterceptor
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableFromCallableTest.java
|
{
"start": 1364,
"end": 9803
}
|
class ____ extends RxJavaTest {
@SuppressWarnings("unchecked")
@Test
public void shouldNotInvokeFuncUntilSubscription() throws Exception {
Callable<Object> func = mock(Callable.class);
when(func.call()).thenReturn(new Object());
Observable<Object> fromCallableObservable = Observable.fromCallable(func);
verifyNoInteractions(func);
fromCallableObservable.subscribe();
verify(func).call();
}
@SuppressWarnings("unchecked")
@Test
public void shouldCallOnNextAndOnCompleted() throws Exception {
Callable<String> func = mock(Callable.class);
when(func.call()).thenReturn("test_value");
Observable<String> fromCallableObservable = Observable.fromCallable(func);
Observer<Object> observer = TestHelper.mockObserver();
fromCallableObservable.subscribe(observer);
verify(observer).onNext("test_value");
verify(observer).onComplete();
verify(observer, never()).onError(any(Throwable.class));
}
@SuppressWarnings("unchecked")
@Test
public void shouldCallOnError() throws Exception {
Callable<Object> func = mock(Callable.class);
Throwable throwable = new IllegalStateException("Test exception");
when(func.call()).thenThrow(throwable);
Observable<Object> fromCallableObservable = Observable.fromCallable(func);
Observer<Object> observer = TestHelper.mockObserver();
fromCallableObservable.subscribe(observer);
verify(observer, never()).onNext(any());
verify(observer, never()).onComplete();
verify(observer).onError(throwable);
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotDeliverResultIfSubscriberUnsubscribedBeforeEmission() throws Exception {
Callable<String> func = mock(Callable.class);
final CountDownLatch funcLatch = new CountDownLatch(1);
final CountDownLatch observerLatch = new CountDownLatch(1);
when(func.call()).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
observerLatch.countDown();
try {
funcLatch.await();
} catch (InterruptedException e) {
// It's okay, unsubscription causes Thread interruption
// Restoring interruption status of the Thread
Thread.currentThread().interrupt();
}
return "should_not_be_delivered";
}
});
Observable<String> fromCallableObservable = Observable.fromCallable(func);
Observer<Object> observer = TestHelper.mockObserver();
TestObserver<String> outer = new TestObserver<>(observer);
fromCallableObservable
.subscribeOn(Schedulers.computation())
.subscribe(outer);
// Wait until func will be invoked
observerLatch.await();
// Unsubscribing before emission
outer.dispose();
// Emitting result
funcLatch.countDown();
// func must be invoked
verify(func).call();
// Observer must not be notified at all
verify(observer).onSubscribe(any(Disposable.class));
verifyNoMoreInteractions(observer);
}
@Test
public void shouldAllowToThrowCheckedException() {
final Exception checkedException = new Exception("test exception");
Observable<Object> fromCallableObservable = Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
throw checkedException;
}
});
Observer<Object> observer = TestHelper.mockObserver();
fromCallableObservable.subscribe(observer);
verify(observer).onSubscribe(any(Disposable.class));
verify(observer).onError(checkedException);
verifyNoMoreInteractions(observer);
}
@Test
public void fusedFlatMapExecution() {
final int[] calls = { 0 };
Observable.just(1).flatMap(new Function<Integer, ObservableSource<? extends Object>>() {
@Override
public ObservableSource<? extends Object> apply(Integer v)
throws Exception {
return Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return ++calls[0];
}
});
}
})
.test()
.assertResult(1);
assertEquals(1, calls[0]);
}
@Test
public void fusedFlatMapExecutionHidden() {
final int[] calls = { 0 };
Observable.just(1).hide().flatMap(new Function<Integer, ObservableSource<? extends Object>>() {
@Override
public ObservableSource<? extends Object> apply(Integer v)
throws Exception {
return Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return ++calls[0];
}
});
}
})
.test()
.assertResult(1);
assertEquals(1, calls[0]);
}
@Test
public void fusedFlatMapNull() {
Observable.just(1).flatMap(new Function<Integer, ObservableSource<? extends Object>>() {
@Override
public ObservableSource<? extends Object> apply(Integer v)
throws Exception {
return Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
});
}
})
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void fusedFlatMapNullHidden() {
Observable.just(1).hide().flatMap(new Function<Integer, ObservableSource<? extends Object>>() {
@Override
public ObservableSource<? extends Object> apply(Integer v)
throws Exception {
return Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
});
}
})
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void disposedOnArrival() {
final int[] count = { 0 };
Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
count[0]++;
return 1;
}
})
.test(true)
.assertEmpty();
assertEquals(0, count[0]);
}
@Test
public void disposedOnCall() {
final TestObserver<Integer> to = new TestObserver<>();
Observable.fromCallable(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
to.dispose();
return 1;
}
})
.subscribe(to);
to.assertEmpty();
}
@Test
public void disposedOnCallThrows() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final TestObserver<Integer> to = new TestObserver<>();
Observable.fromCallable(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
to.dispose();
throw new TestException();
}
})
.subscribe(to);
to.assertEmpty();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void take() {
Observable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return 1;
}
})
.take(1)
.test()
.assertResult(1);
}
}
|
ObservableFromCallableTest
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/web/accept/HeaderApiVersionResolverTests.java
|
{
"start": 930,
"end": 1580
}
|
class ____ {
private final String headerName = "Api-Version";
private final HeaderApiVersionResolver resolver = new HeaderApiVersionResolver(headerName);
@Test
void resolve() {
String version = "1.2";
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/path");
request.addHeader(headerName, version);
String actual = resolver.resolveVersion(request);
assertThat(actual).isEqualTo(version);
}
@Test
void noHeader() {
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/path");
String version = resolver.resolveVersion(request);
assertThat(version).isNull();
}
}
|
HeaderApiVersionResolverTests
|
java
|
resilience4j__resilience4j
|
resilience4j-commons-configuration/src/main/java/io/github/resilience4j/commons/configuration/timelimiter/configure/CommonsConfigurationTimeLimiterRegistry.java
|
{
"start": 1200,
"end": 2317
}
|
class ____ {
private CommonsConfigurationTimeLimiterRegistry() {
}
/**
* Create a TimeLimiterRegistry from apache commons configuration instance
* @param configuration - apache commons configuration instance
* @param customizer - customizer for time limiter configuration
* @return a TimeLimiterRegistry with a Map of shared TimeLimiter configurations.
*/
public static TimeLimiterRegistry of(Configuration configuration, CompositeCustomizer<TimeLimiterConfigCustomizer> customizer){
CommonTimeLimiterConfigurationProperties timeLimiterProperties = CommonsConfigurationTimeLimiterConfiguration.of(configuration);
Map<String, TimeLimiterConfig> timeLimiterConfigMap = timeLimiterProperties.getInstances()
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> timeLimiterProperties.createTimeLimiterConfig(entry.getKey(), entry.getValue(), customizer)));
return TimeLimiterRegistry.of(timeLimiterConfigMap);
}
}
|
CommonsConfigurationTimeLimiterRegistry
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/UndertowHeaderTest.java
|
{
"start": 1087,
"end": 3895
}
|
class ____ extends BaseUndertowTest {
@Test
public void testHttpHeaders() throws Exception {
getMockEndpoint("mock:input").expectedMessageCount(1);
getMockEndpoint("mock:input").expectedHeaderReceived("param", "true");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_METHOD, "GET");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URL, "http://localhost:" + getPort() + "/headers");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URI, "/headers");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_QUERY, "param=true");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "");
String out = template.requestBody("http://localhost:" + getPort() + "/headers?param=true", null, String.class);
assertEquals("Bye World", out);
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testHttpHeadersPost() throws Exception {
getMockEndpoint("mock:input").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_METHOD, "POST");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URL, "http://localhost:" + getPort() + "/headers");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_URI, "/headers");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_QUERY, "");
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "");
String out = template.requestBody("http://localhost:" + getPort() + "/headers", "Hello World", String.class);
assertEquals("Bye World", out);
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testHttpPathHeader() throws Exception {
getMockEndpoint("mock:input").expectedMessageCount(1);
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "/headers");
String out = template.requestBody("http://localhost:" + getPort() + "/hello/headers", null, String.class);
assertEquals("Hello World", out);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("undertow:http://localhost:{{port}}/headers")
.to("mock:input")
.transform().constant("Bye World");
from("undertow:http://localhost:{{port}}/hello?matchOnUriPrefix=true")
.to("mock:input")
.transform().constant("Hello World");
}
};
}
}
|
UndertowHeaderTest
|
java
|
elastic__elasticsearch
|
client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java
|
{
"start": 14311,
"end": 14575
}
|
enum ____ {
HTTP("http"),
HTTPS("https");
private final String name;
Scheme(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
}
|
Scheme
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/model/simple/oneclassmandatory/Order.java
|
{
"start": 1115,
"end": 4132
}
|
class ____ {
@DataField(pos = 1)
private int orderNr;
@DataField(pos = 2, required = true)
private String clientNr;
@DataField(pos = 3, required = true)
private String firstName;
@DataField(pos = 4, required = true)
private String lastName;
@DataField(pos = 5)
private String instrumentCode;
@DataField(pos = 6)
private String instrumentNumber;
@DataField(pos = 7)
private String orderType;
@DataField(name = "Name", pos = 8)
private String instrumentType;
@DataField(pos = 9, precision = 2)
private BigDecimal amount;
@DataField(pos = 10)
private String currency;
@DataField(pos = 11, pattern = "dd-MM-yyyy")
private Date orderDate;
public int getOrderNr() {
return orderNr;
}
public void setOrderNr(int orderNr) {
this.orderNr = orderNr;
}
public String getClientNr() {
return clientNr;
}
public void setClientNr(String clientNr) {
this.clientNr = clientNr;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public String getInstrumentCode() {
return instrumentCode;
}
public void setInstrumentCode(String instrumentCode) {
this.instrumentCode = instrumentCode;
}
public String getInstrumentNumber() {
return instrumentNumber;
}
public void setInstrumentNumber(String instrumentNumber) {
this.instrumentNumber = instrumentNumber;
}
public String getOrderType() {
return orderType;
}
public void setOrderType(String orderType) {
this.orderType = orderType;
}
public String getInstrumentType() {
return instrumentType;
}
public void setInstrumentType(String instrumentType) {
this.instrumentType = instrumentType;
}
public BigDecimal getAmount() {
return amount;
}
public void setAmount(BigDecimal amount) {
this.amount = amount;
}
public String getCurrency() {
return currency;
}
public void setCurrency(String currency) {
this.currency = currency;
}
public Date getOrderDate() {
return orderDate;
}
public void setOrderDate(Date orderDate) {
this.orderDate = orderDate;
}
@Override
public String toString() {
return "Model : " + Order.class.getName() + " : " + this.orderNr + ", " + this.orderType + ", "
+ String.valueOf(this.amount) + ", " + this.instrumentCode + ", "
+ this.instrumentNumber + ", " + this.instrumentType + ", " + this.currency + ", " + this.clientNr + ", "
+ this.firstName + ", " + this.lastName + ", "
+ String.valueOf(this.orderDate);
}
}
|
Order
|
java
|
spring-projects__spring-security
|
access/src/main/java/org/springframework/security/access/intercept/aspectj/AspectJCallback.java
|
{
"start": 1124,
"end": 1187
}
|
class ____ uses Spring AOP annotations.
*/
@Deprecated
public
|
that
|
java
|
netty__netty
|
transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java
|
{
"start": 1653,
"end": 1774
}
|
class ____ extends MultiThreadIoEventLoopGroup {
// This does not use static by design to ensure the
|
EpollEventLoopGroup
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanTests.java
|
{
"start": 18148,
"end": 18253
}
|
class ____<T> {
}
@Validated(BeanGroup.class)
@ConfigurationProperties
static
|
AnnotatedGenericComponent
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/ReturnMissingNullableTest.java
|
{
"start": 1027,
"end": 1350
}
|
class ____ {
@Test
public void literalNullReturn() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/LiteralNullReturnTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
public
|
ReturnMissingNullableTest
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/codec/json/Jackson2CodecSupport.java
|
{
"start": 1946,
"end": 2223
}
|
class ____ support methods for Jackson 2.x encoding and decoding.
*
* @author Sebastien Deleuze
* @author Rossen Stoyanchev
* @since 5.0
* @deprecated since 7.0 in favor of {@link JacksonCodecSupport}
*/
@Deprecated(since = "7.0", forRemoval = true)
public abstract
|
providing
|
java
|
square__moshi
|
moshi/src/test/java/com/squareup/moshi/internal/ClassJsonAdapterTest.java
|
{
"start": 1952,
"end": 2495
}
|
class ____ {
private String secretIngredient;
}
@Test
public void privateFields() throws Exception {
PrivateFieldsPizza value = new PrivateFieldsPizza();
value.secretIngredient = "vodka";
String toJson = toJson(PrivateFieldsPizza.class, value);
assertThat(toJson).isEqualTo("{\"secretIngredient\":\"vodka\"}");
PrivateFieldsPizza fromJson =
fromJson(PrivateFieldsPizza.class, "{\"secretIngredient\":\"vodka\"}");
assertThat(fromJson.secretIngredient).isEqualTo("vodka");
}
static
|
PrivateFieldsPizza
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/util/CollectionUtilsTests.java
|
{
"start": 11352,
"end": 11807
}
|
class ____ {
private final String name;
public Instance(String name) {
this.name = name;
}
@Override
public boolean equals(@Nullable Object rhs) {
if (this == rhs) {
return true;
}
if (rhs == null || getClass() != rhs.getClass()) {
return false;
}
Instance instance = (Instance) rhs;
return this.name.equals(instance.name);
}
@Override
public int hashCode() {
return this.name.hashCode();
}
}
}
|
Instance
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/DiscriminatorNotNullSingleTableTest.java
|
{
"start": 5795,
"end": 6249
}
|
class ____ extends Account {
private boolean active;
//Getters and setters are omitted for brevity
//end::entity-inheritance-single-table-discriminator-value-example[]
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
//tag::entity-inheritance-single-table-discriminator-value-example[]
}
//end::entity-inheritance-single-table-discriminator-value-example[]
}
|
OtherAccount
|
java
|
netty__netty
|
resolver-dns/src/main/java/io/netty/resolver/dns/DnsQueryContext.java
|
{
"start": 20335,
"end": 23412
}
|
class ____ implements AddressedEnvelope<DnsResponse, InetSocketAddress> {
private final InetSocketAddress sender;
private final InetSocketAddress recipient;
private final DnsResponse response;
AddressedEnvelopeAdapter(InetSocketAddress sender, InetSocketAddress recipient, DnsResponse response) {
this.sender = sender;
this.recipient = recipient;
this.response = response;
}
@Override
public DnsResponse content() {
return response;
}
@Override
public InetSocketAddress sender() {
return sender;
}
@Override
public InetSocketAddress recipient() {
return recipient;
}
@Override
public AddressedEnvelope<DnsResponse, InetSocketAddress> retain() {
response.retain();
return this;
}
@Override
public AddressedEnvelope<DnsResponse, InetSocketAddress> retain(int increment) {
response.retain(increment);
return this;
}
@Override
public AddressedEnvelope<DnsResponse, InetSocketAddress> touch() {
response.touch();
return this;
}
@Override
public AddressedEnvelope<DnsResponse, InetSocketAddress> touch(Object hint) {
response.touch(hint);
return this;
}
@Override
public int refCnt() {
return response.refCnt();
}
@Override
public boolean release() {
return response.release();
}
@Override
public boolean release(int decrement) {
return response.release(decrement);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof AddressedEnvelope)) {
return false;
}
@SuppressWarnings("unchecked")
final AddressedEnvelope<?, SocketAddress> that = (AddressedEnvelope<?, SocketAddress>) obj;
if (sender() == null) {
if (that.sender() != null) {
return false;
}
} else if (!sender().equals(that.sender())) {
return false;
}
if (recipient() == null) {
if (that.recipient() != null) {
return false;
}
} else if (!recipient().equals(that.recipient())) {
return false;
}
return response.equals(obj);
}
@Override
public int hashCode() {
int hashCode = response.hashCode();
if (sender() != null) {
hashCode = hashCode * 31 + sender().hashCode();
}
if (recipient() != null) {
hashCode = hashCode * 31 + recipient().hashCode();
}
return hashCode;
}
}
}
|
AddressedEnvelopeAdapter
|
java
|
spring-projects__spring-security
|
webauthn/src/test/java/org/springframework/security/web/webauthn/jackson/Jackson2Tests.java
|
{
"start": 2392,
"end": 14634
}
|
class ____ {
private ObjectMapper mapper;
@BeforeEach
void setup() {
this.mapper = new ObjectMapper();
this.mapper.registerModule(new WebauthnJackson2Module());
}
@Test
void readAuthenticatorTransport() throws Exception {
AuthenticatorTransport transport = this.mapper.readValue("\"hybrid\"", AuthenticatorTransport.class);
assertThat(transport).isEqualTo(AuthenticatorTransport.HYBRID);
}
@Test
void readAuthenticatorAttachment() throws Exception {
AuthenticatorAttachment value = this.mapper.readValue("\"cross-platform\"", AuthenticatorAttachment.class);
assertThat(value).isEqualTo(AuthenticatorAttachment.CROSS_PLATFORM);
}
@Test
void writeAuthenticatorAttachment() throws Exception {
String value = this.mapper.writeValueAsString(AuthenticatorAttachment.CROSS_PLATFORM);
assertThat(value).isEqualTo("\"cross-platform\"");
}
@Test
void readAuthenticationExtensionsClientOutputs() throws Exception {
String json = """
{
"credProps": {
"rk": false
}
}
""";
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
AuthenticationExtensionsClientOutputs outputs = this.mapper.readValue(json,
AuthenticationExtensionsClientOutputs.class);
assertThat(outputs).usingRecursiveComparison().isEqualTo(clientExtensionResults);
}
@Test
void readAuthenticationExtensionsClientOutputsWhenAuthenticatorDisplayName() throws Exception {
String json = """
{
"credProps": {
"rk": false,
"authenticatorDisplayName": "1Password"
}
}
""";
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
AuthenticationExtensionsClientOutputs outputs = this.mapper.readValue(json,
AuthenticationExtensionsClientOutputs.class);
assertThat(outputs).usingRecursiveComparison().isEqualTo(clientExtensionResults);
}
@Test
void readCredPropsWhenAuthenticatorDisplayName() throws Exception {
String json = """
{
"rk": false,
"authenticatorDisplayName": "1Password"
}
""";
CredentialPropertiesOutput credProps = new CredentialPropertiesOutput(false);
CredentialPropertiesOutput outputs = this.mapper.readValue(json, CredentialPropertiesOutput.class);
assertThat(outputs).usingRecursiveComparison().isEqualTo(credProps);
}
@Test
void readAuthenticationExtensionsClientOutputsWhenFieldAfter() throws Exception {
String json = """
{
"clientOutputs": {
"credProps": {
"rk": false
}
},
"label": "Cell Phone"
}
""";
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
ClassWithOutputsAndAnotherField expected = new ClassWithOutputsAndAnotherField();
expected.setClientOutputs(clientExtensionResults);
expected.setLabel("Cell Phone");
ClassWithOutputsAndAnotherField actual = this.mapper.readValue(json, ClassWithOutputsAndAnotherField.class);
assertThat(actual).usingRecursiveComparison().isEqualTo(expected);
}
@Test
void writePublicKeyCredentialCreationOptions() throws Exception {
String expected = """
{
"attestation": "none",
"authenticatorSelection": {
"residentKey": "required"
},
"challenge": "q7lCdd3SVQxdC-v8pnRAGEn1B2M-t7ZECWPwCAmhWvc",
"excludeCredentials": [],
"extensions": {
"credProps": true
},
"pubKeyCredParams": [
{
"alg": -7,
"type": "public-key"
},{
"alg": -8,
"type": "public-key"
},
{
"alg": -257,
"type": "public-key"
}
],
"rp": {
"id": "example.localhost",
"name": "SimpleWebAuthn Example"
},
"timeout": 300000,
"user": {
"displayName": "user@example.localhost",
"id": "oWJtkJ6vJ_m5b84LB4_K7QKTCTEwLIjCh4tFMCGHO4w",
"name": "user@example.localhost"
}
}
""";
PublicKeyCredentialCreationOptions options = TestPublicKeyCredentialCreationOptions
.createPublicKeyCredentialCreationOptions()
.build();
String string = this.mapper.writeValueAsString(options);
JSONAssert.assertEquals(expected, string, false);
}
@Test
void readPublicKeyCredentialAuthenticatorAttestationResponse() throws Exception {
PublicKeyCredential<AuthenticatorAttestationResponse> publicKeyCredential = this.mapper.readValue(
PublicKeyCredentialJson.PUBLIC_KEY_JSON,
new TypeReference<PublicKeyCredential<AuthenticatorAttestationResponse>>() {
});
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
PublicKeyCredential<AuthenticatorAttestationResponse> expected = PublicKeyCredential.builder()
.id("AX6nVVERrH6opMafUGn3Z9EyNEy6cftfBKV_2YxYl1jdW8CSJxMKGXFV3bnrKTiMSJeInkG7C6B2lPt8E5i3KaM")
.rawId(Bytes
.fromBase64("AX6nVVERrH6opMafUGn3Z9EyNEy6cftfBKV_2YxYl1jdW8CSJxMKGXFV3bnrKTiMSJeInkG7C6B2lPt8E5i3KaM"))
.response(AuthenticatorAttestationResponse.builder()
.attestationObject(Bytes.fromBase64(
"o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVjFSZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2NFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQF-p1VREax-qKTGn1Bp92fRMjRMunH7XwSlf9mMWJdY3VvAkicTChlxVd256yk4jEiXiJ5BuwugdpT7fBOYtymjpQECAyYgASFYIJK-2epPEw0ujHN-gvVp2Hp3ef8CzU3zqwO5ylx8L2OsIlggK5x5OlTGEPxLS-85TAABum4aqVK4CSWJ7LYDdkjuBLk"))
.clientDataJSON(Bytes.fromBase64(
"eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiSUJRbnVZMVowSzFIcUJvRldDcDJ4bEpsOC1vcV9hRklYenlUX0YwLTBHVSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0"))
.transports(AuthenticatorTransport.HYBRID, AuthenticatorTransport.INTERNAL)
.build())
.type(PublicKeyCredentialType.PUBLIC_KEY)
.clientExtensionResults(clientExtensionResults)
.authenticatorAttachment(AuthenticatorAttachment.CROSS_PLATFORM)
.build();
assertThat(publicKeyCredential).usingRecursiveComparison().isEqualTo(expected);
}
@Test
void readPublicKeyCredentialAuthenticatorAttestationResponseWhenExtraFields() throws Exception {
final String json = """
{
"attestationObject": "o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVjFSZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2NFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQF-p1VREax-qKTGn1Bp92fRMjRMunH7XwSlf9mMWJdY3VvAkicTChlxVd256yk4jEiXiJ5BuwugdpT7fBOYtymjpQECAyYgASFYIJK-2epPEw0ujHN-gvVp2Hp3ef8CzU3zqwO5ylx8L2OsIlggK5x5OlTGEPxLS-85TAABum4aqVK4CSWJ7LYDdkjuBLk",
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiSUJRbnVZMVowSzFIcUJvRldDcDJ4bEpsOC1vcV9hRklYenlUX0YwLTBHVSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0",
"transports": [
"hybrid",
"internal"
],
"publicKeyAlgorithm": -7,
"publicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEkr7Z6k8TDS6Mc36C9WnYend5_wLNTfOrA7nKXHwvY6wrnHk6VMYQ_EtL7zlMAAG6bhqpUrgJJYnstgN2SO4EuQ",
"authenticatorData": "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2NFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQF-p1VREax-qKTGn1Bp92fRMjRMunH7XwSlf9mMWJdY3VvAkicTChlxVd256yk4jEiXiJ5BuwugdpT7fBOYtymjpQECAyYgASFYIJK-2epPEw0ujHN-gvVp2Hp3ef8CzU3zqwO5ylx8L2OsIlggK5x5OlTGEPxLS-85TAABum4aqVK4CSWJ7LYDdkjuBLk"
}
""";
AuthenticatorAttestationResponse response = this.mapper.readValue(json, AuthenticatorAttestationResponse.class);
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
AuthenticatorAttestationResponse expected = AuthenticatorAttestationResponse.builder()
.attestationObject(Bytes.fromBase64(
"o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVjFSZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2NFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQF-p1VREax-qKTGn1Bp92fRMjRMunH7XwSlf9mMWJdY3VvAkicTChlxVd256yk4jEiXiJ5BuwugdpT7fBOYtymjpQECAyYgASFYIJK-2epPEw0ujHN-gvVp2Hp3ef8CzU3zqwO5ylx8L2OsIlggK5x5OlTGEPxLS-85TAABum4aqVK4CSWJ7LYDdkjuBLk"))
.clientDataJSON(Bytes.fromBase64(
"eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiSUJRbnVZMVowSzFIcUJvRldDcDJ4bEpsOC1vcV9hRklYenlUX0YwLTBHVSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0"))
.transports(AuthenticatorTransport.HYBRID, AuthenticatorTransport.INTERNAL)
.build();
assertThat(response).usingRecursiveComparison().isEqualTo(expected);
}
@Test
void writeAuthenticationOptions() throws Exception {
PublicKeyCredentialRequestOptions credentialRequestOptions = PublicKeyCredentialRequestOptions.builder()
.allowCredentials(Arrays.asList())
.challenge(Bytes.fromBase64("I69THX904Q8ONhCgUgOu2PCQCcEjTDiNmokdbgsAsYU"))
.rpId("example.localhost")
.timeout(Duration.ofMinutes(5))
.userVerification(UserVerificationRequirement.REQUIRED)
.build();
String actual = this.mapper.writeValueAsString(credentialRequestOptions);
String expected = """
{
"challenge": "I69THX904Q8ONhCgUgOu2PCQCcEjTDiNmokdbgsAsYU",
"allowCredentials": [],
"timeout": 300000,
"userVerification": "required",
"rpId": "example.localhost"
}
""";
JSONAssert.assertEquals(expected, actual, false);
}
@Test
void readPublicKeyCredentialAuthenticatorAssertionResponse() throws Exception {
String json = """
{
"id": "IquGb208Fffq2cROa1ZxMg",
"rawId": "IquGb208Fffq2cROa1ZxMg",
"response": {
"authenticatorData": "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MdAAAAAA",
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiaDB2Z3dHUWpvQ3pBekRVc216UHBrLUpWSUpSUmduMEw0S1ZTWU5SY0VaYyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0",
"signature": "MEUCIAdfzPAn3voyXynwa0IXk1S0envMY5KP3NEe9aj4B2BuAiEAm_KJhQoWXdvfhbzwACU3NM4ltQe7_Il46qFUwtpuTdg",
"userHandle": "oWJtkJ6vJ_m5b84LB4_K7QKTCTEwLIjCh4tFMCGHO4w"
},
"type": "public-key",
"clientExtensionResults": {},
"authenticatorAttachment": "cross-platform"
}
""";
PublicKeyCredential<AuthenticatorAssertionResponse> publicKeyCredential = this.mapper.readValue(json,
new TypeReference<PublicKeyCredential<AuthenticatorAssertionResponse>>() {
});
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs();
PublicKeyCredential<AuthenticatorAssertionResponse> expected = PublicKeyCredential.builder()
.id("IquGb208Fffq2cROa1ZxMg")
.rawId(Bytes.fromBase64("IquGb208Fffq2cROa1ZxMg"))
.response(AuthenticatorAssertionResponse.builder()
.authenticatorData(Bytes.fromBase64("SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MdAAAAAA"))
.clientDataJSON(Bytes.fromBase64(
"eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiaDB2Z3dHUWpvQ3pBekRVc216UHBrLUpWSUpSUmduMEw0S1ZTWU5SY0VaYyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0"))
.signature(Bytes.fromBase64(
"MEUCIAdfzPAn3voyXynwa0IXk1S0envMY5KP3NEe9aj4B2BuAiEAm_KJhQoWXdvfhbzwACU3NM4ltQe7_Il46qFUwtpuTdg"))
.userHandle(Bytes.fromBase64("oWJtkJ6vJ_m5b84LB4_K7QKTCTEwLIjCh4tFMCGHO4w"))
.build())
.type(PublicKeyCredentialType.PUBLIC_KEY)
.clientExtensionResults(clientExtensionResults)
.authenticatorAttachment(AuthenticatorAttachment.CROSS_PLATFORM)
.build();
assertThat(publicKeyCredential).usingRecursiveComparison().isEqualTo(expected);
}
@Test
void writeAuthenticationExtensionsClientInputsWhenCredPropsTrue() throws Exception {
String expected = """
{
"credProps": true
}
""";
ImmutableAuthenticationExtensionsClientInputs clientInputs = new ImmutableAuthenticationExtensionsClientInputs(
ImmutableAuthenticationExtensionsClientInput.credProps);
String actual = this.mapper.writeValueAsString(clientInputs);
JSONAssert.assertEquals(expected, actual, false);
}
public static
|
Jackson2Tests
|
java
|
quarkusio__quarkus
|
integration-tests/gradle/src/main/resources/spring-dependency-plugin-project/src/main/java/org/acme/ExampleResource.java
|
{
"start": 164,
"end": 289
}
|
class ____ {
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return "";
}
}
|
ExampleResource
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.