language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/operators/sink/InitContextInitializationContextAdapter.java | {
"start": 1236,
"end": 2094
} | class ____ implements SerializationSchema.InitializationContext {
private final UserCodeClassLoader userCodeClassLoader;
private final Supplier<MetricGroup> metricGroupSupplier;
private MetricGroup cachedMetricGroup;
public InitContextInitializationContextAdapter(
UserCodeClassLoader userCodeClassLoader, Supplier<MetricGroup> metricGroupSupplier) {
this.userCodeClassLoader = userCodeClassLoader;
this.metricGroupSupplier = metricGroupSupplier;
}
@Override
public MetricGroup getMetricGroup() {
if (cachedMetricGroup == null) {
cachedMetricGroup = metricGroupSupplier.get();
}
return cachedMetricGroup;
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return userCodeClassLoader;
}
}
| InitContextInitializationContextAdapter |
java | apache__camel | components/camel-cbor/src/test/java/org/apache/camel/component/cbor/Author.java | {
"start": 852,
"end": 1202
} | class ____ {
private String name;
private String surname;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getSurname() {
return surname;
}
public void setSurname(String surname) {
this.surname = surname;
}
}
| Author |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/JoinedInheritanceCollectionSameHierarchyTest.java | {
"start": 3204,
"end": 3510
} | class ____ extends SuperEntity {
@ManyToOne( fetch = FetchType.LAZY )
UserEntity employee;
public AbstractCompany() {
}
public AbstractCompany(Integer id) {
super( id );
}
public UserEntity getEmployee() {
return employee;
}
}
@Entity( name = "GoodCompany" )
static | AbstractCompany |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/cache/annotation/ReactiveCachingTests.java | {
"start": 13867,
"end": 14074
} | class ____ {
@Bean
CacheManager cacheManager() {
return new ConcurrentMapCacheManager("first");
}
}
@Configuration(proxyBeanMethods = false)
@EnableCaching
static | EarlyCacheHitDeterminationConfig |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java | {
"start": 928,
"end": 2651
} | class ____ extends AbstractXContentSerializingTestCase<MeanSquaredError> {
@Override
protected MeanSquaredError doParseInstance(XContentParser parser) throws IOException {
return MeanSquaredError.fromXContent(parser);
}
@Override
protected MeanSquaredError createTestInstance() {
return createRandom();
}
@Override
protected MeanSquaredError mutateInstance(MeanSquaredError instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<MeanSquaredError> instanceReader() {
return MeanSquaredError::new;
}
public static MeanSquaredError createRandom() {
return new MeanSquaredError();
}
public void testEvaluate() {
InternalAggregations aggs = InternalAggregations.from(
Arrays.asList(mockSingleValue("regression_mse", 0.8123), mockSingleValue("some_other_single_metric_agg", 0.2377))
);
MeanSquaredError mse = new MeanSquaredError();
mse.process(aggs);
EvaluationMetricResult result = mse.getResult().get();
String expected = "{\"value\":0.8123}";
assertThat(Strings.toString(result), equalTo(expected));
}
public void testEvaluate_GivenMissingAggs() {
InternalAggregations aggs = InternalAggregations.from(
Collections.singletonList(mockSingleValue("some_other_single_metric_agg", 0.2377))
);
MeanSquaredError mse = new MeanSquaredError();
mse.process(aggs);
EvaluationMetricResult result = mse.getResult().get();
assertThat(result, equalTo(new MeanSquaredError.Result(0.0)));
}
}
| MeanSquaredErrorTests |
java | apache__kafka | connect/api/src/main/java/org/apache/kafka/connect/errors/IllegalWorkerStateException.java | {
"start": 956,
"end": 1297
} | class ____ extends ConnectException {
public IllegalWorkerStateException(String s) {
super(s);
}
public IllegalWorkerStateException(String s, Throwable throwable) {
super(s, throwable);
}
public IllegalWorkerStateException(Throwable throwable) {
super(throwable);
}
}
| IllegalWorkerStateException |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimitsTests.java | {
"start": 2612,
"end": 12749
} | class ____<PlanType extends LogicalPlan> {
private final Class<PlanType> clazz;
private final BiFunction<LogicalPlan, Attribute, PlanType> planBuilder;
private final BiConsumer<PlanType, PlanType> planChecker;
PushDownLimitTestCase(
Class<PlanType> clazz,
BiFunction<LogicalPlan, Attribute, PlanType> planBuilder,
BiConsumer<PlanType, PlanType> planChecker
) {
this.clazz = clazz;
this.planBuilder = planBuilder;
this.planChecker = planChecker;
}
public PlanType buildPlan(LogicalPlan child, Attribute attr) {
return planBuilder.apply(child, attr);
}
public void checkOptimizedPlan(LogicalPlan basePlan, LogicalPlan optimizedPlan) {
planChecker.accept(as(basePlan, clazz), as(optimizedPlan, clazz));
}
}
private static final List<PushDownLimitTestCase<? extends UnaryPlan>> PUSHABLE_LIMIT_TEST_CASES = List.of(
new PushDownLimitTestCase<>(
Eval.class,
(plan, attr) -> new Eval(EMPTY, plan, List.of(new Alias(EMPTY, "y", new ToInteger(EMPTY, attr)))),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.fields(), optimizedPlan.fields());
}
),
new PushDownLimitTestCase<>(
Completion.class,
(plan, attr) -> new Completion(EMPTY, plan, randomLiteral(KEYWORD), randomLiteral(KEYWORD), attr),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.inferenceId(), optimizedPlan.inferenceId());
assertEquals(basePlan.prompt(), optimizedPlan.prompt());
assertEquals(basePlan.targetField(), optimizedPlan.targetField());
}
),
new PushDownLimitTestCase<>(
Rerank.class,
(plan, attr) -> new Rerank(
EMPTY,
plan,
randomLiteral(KEYWORD),
randomLiteral(KEYWORD),
randomList(1, 10, () -> new Alias(EMPTY, randomIdentifier(), randomLiteral(KEYWORD))),
attr
),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.inferenceId(), optimizedPlan.inferenceId());
assertEquals(basePlan.queryText(), optimizedPlan.queryText());
assertEquals(basePlan.rerankFields(), optimizedPlan.rerankFields());
assertEquals(basePlan.scoreAttribute(), optimizedPlan.scoreAttribute());
}
),
new PushDownLimitTestCase<>(
Enrich.class,
(plan, attr) -> new Enrich(
EMPTY,
plan,
randomFrom(Enrich.Mode.ANY, Enrich.Mode.COORDINATOR),
randomLiteral(KEYWORD),
attr,
null,
Map.of(),
List.of()
),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.mode(), optimizedPlan.mode());
assertEquals(basePlan.policyName(), optimizedPlan.policyName());
assertEquals(basePlan.matchField(), optimizedPlan.matchField());
}
)
);
private static final List<PushDownLimitTestCase<? extends UnaryPlan>> NON_PUSHABLE_LIMIT_TEST_CASES = List.of(
new PushDownLimitTestCase<>(
Filter.class,
(plan, attr) -> new Filter(EMPTY, plan, new Equals(EMPTY, attr, new Literal(EMPTY, BytesRefs.toBytesRef("right"), TEXT))),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.condition(), optimizedPlan.condition());
}
),
new PushDownLimitTestCase<>(
OrderBy.class,
(plan, attr) -> new OrderBy(EMPTY, plan, List.of(new Order(EMPTY, attr, Order.OrderDirection.DESC, null))),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.order(), optimizedPlan.order());
}
)
);
public void testPushableLimit() {
FieldAttribute a = getFieldAttribute("a");
FieldAttribute b = getFieldAttribute("b");
EsRelation relation = relation().withAttributes(List.of(a, b));
for (PushDownLimitTestCase<? extends UnaryPlan> pushableLimitTestCase : PUSHABLE_LIMIT_TEST_CASES) {
int precedingLimitValue = randomIntBetween(1, 10_000);
Limit precedingLimit = new Limit(EMPTY, new Literal(EMPTY, precedingLimitValue, INTEGER), relation);
LogicalPlan pushableLimitTestPlan = pushableLimitTestCase.buildPlan(precedingLimit, a);
int pushableLimitValue = randomIntBetween(1, 10_000);
Limit pushableLimit = new Limit(EMPTY, new Literal(EMPTY, pushableLimitValue, INTEGER), pushableLimitTestPlan);
LogicalPlan optimizedPlan = optimizePlan(pushableLimit);
pushableLimitTestCase.checkOptimizedPlan(pushableLimitTestPlan, optimizedPlan);
assertEquals(
as(optimizedPlan, UnaryPlan.class).child(),
new Limit(EMPTY, new Literal(EMPTY, Math.min(pushableLimitValue, precedingLimitValue), INTEGER), relation)
);
}
}
public void testNonPushableLimit() {
FieldAttribute a = getFieldAttribute("a");
FieldAttribute b = getFieldAttribute("b");
EsRelation relation = relation().withAttributes(List.of(a, b));
for (PushDownLimitTestCase<? extends UnaryPlan> nonPushableLimitTestCase : NON_PUSHABLE_LIMIT_TEST_CASES) {
int precedingLimitValue = randomIntBetween(1, 10_000);
Limit precedingLimit = new Limit(EMPTY, new Literal(EMPTY, precedingLimitValue, INTEGER), relation);
UnaryPlan nonPushableLimitTestPlan = nonPushableLimitTestCase.buildPlan(precedingLimit, a);
int nonPushableLimitValue = randomIntBetween(1, 10_000);
Limit nonPushableLimit = new Limit(EMPTY, new Literal(EMPTY, nonPushableLimitValue, INTEGER), nonPushableLimitTestPlan);
Limit optimizedPlan = as(optimizePlan(nonPushableLimit), Limit.class);
nonPushableLimitTestCase.checkOptimizedPlan(nonPushableLimitTestPlan, optimizedPlan.child());
assertEquals(
optimizedPlan,
new Limit(
EMPTY,
new Literal(EMPTY, Math.min(nonPushableLimitValue, precedingLimitValue), INTEGER),
nonPushableLimitTestPlan
)
);
assertEquals(as(optimizedPlan.child(), UnaryPlan.class).child(), nonPushableLimitTestPlan.child());
}
}
private static final List<PushDownLimitTestCase<? extends LogicalPlan>> DUPLICATING_TEST_CASES = List.of(
new PushDownLimitTestCase<>(
Enrich.class,
(plan, attr) -> new Enrich(EMPTY, plan, Enrich.Mode.REMOTE, randomLiteral(KEYWORD), attr, null, Map.of(), List.of()),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.mode(), optimizedPlan.mode());
assertEquals(basePlan.policyName(), optimizedPlan.policyName());
assertEquals(basePlan.matchField(), optimizedPlan.matchField());
var limit = as(optimizedPlan.child(), Limit.class);
assertTrue(limit.local());
assertFalse(limit.duplicated());
}
),
new PushDownLimitTestCase<>(MvExpand.class, (plan, attr) -> new MvExpand(EMPTY, plan, attr, attr), (basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
assertEquals(basePlan.expanded(), optimizedPlan.expanded());
var limit = as(optimizedPlan.child(), Limit.class);
assertFalse(limit.local());
assertFalse(limit.duplicated());
}),
new PushDownLimitTestCase<>(
Join.class,
(plan, attr) -> new Join(EMPTY, plan, plan, new JoinConfig(JoinTypes.LEFT, List.of(), List.of(), attr)),
(basePlan, optimizedPlan) -> {
assertEquals(basePlan.source(), optimizedPlan.source());
var limit = as(optimizedPlan.left(), Limit.class);
assertFalse(limit.local());
assertFalse(limit.duplicated());
}
)
);
public void testPushableLimitDuplicate() {
FieldAttribute a = getFieldAttribute("a");
FieldAttribute b = getFieldAttribute("b");
EsRelation relation = relation().withAttributes(List.of(a, b));
for (PushDownLimitTestCase<? extends LogicalPlan> duplicatingTestCase : DUPLICATING_TEST_CASES) {
int precedingLimitValue = randomIntBetween(1, 10_000);
Limit precedingLimit = new Limit(EMPTY, new Literal(EMPTY, precedingLimitValue, INTEGER), relation);
LogicalPlan duplicatingLimitTestPlan = duplicatingTestCase.buildPlan(precedingLimit, a);
int upperLimitValue = randomIntBetween(1, precedingLimitValue);
Limit upperLimit = new Limit(EMPTY, new Literal(EMPTY, upperLimitValue, INTEGER), duplicatingLimitTestPlan);
Limit optimizedPlan = as(optimizePlan(upperLimit), Limit.class);
duplicatingTestCase.checkOptimizedPlan(duplicatingLimitTestPlan, optimizedPlan.child());
assertTrue(optimizedPlan.duplicated());
assertFalse(optimizedPlan.local());
}
}
private LogicalPlan optimizePlan(LogicalPlan plan) {
return new PushDownAndCombineLimits().apply(plan, unboundLogicalOptimizerContext());
}
}
| PushDownLimitTestCase |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/ContextLoaderUtilsContextHierarchyTests.java | {
"start": 24583,
"end": 24715
} | interface ____ {
}
@ContextHierarchy(@ContextConfiguration("C.xml"))
@Retention(RetentionPolicy.RUNTIME)
private @ | ContextHierarchyB |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/matchers/EndsWith.java | {
"start": 234,
"end": 632
} | class ____ implements ArgumentMatcher<String>, Serializable {
private final String suffix;
public EndsWith(String suffix) {
this.suffix = suffix;
}
@Override
public boolean matches(String actual) {
return actual != null && actual.endsWith(suffix);
}
@Override
public String toString() {
return "endsWith(\"" + suffix + "\")";
}
}
| EndsWith |
java | apache__camel | core/camel-core-reifier/src/main/java/org/apache/camel/reifier/language/XQueryExpressionReifier.java | {
"start": 1164,
"end": 2478
} | class ____ extends SingleInputTypedExpressionReifier<XQueryExpression> {
public XQueryExpressionReifier(CamelContext camelContext, ExpressionDefinition definition) {
super(camelContext, definition);
}
@Override
protected void configurePredicate(Predicate predicate) {
configureNamespaceAware(predicate);
}
@Override
protected void configureExpression(Expression expression) {
configureNamespaceAware(expression);
}
protected void configureNamespaceAware(Object builder) {
if (definition.getNamespaces() != null && builder instanceof NamespaceAware namespaceAware) {
namespaceAware.setNamespaces(definition.getNamespaces());
}
}
protected Object[] createProperties() {
Object[] properties = new Object[3];
properties[0] = asResultType();
properties[1] = parseString(definition.getSource());
properties[2] = definition.getNamespaces();
return properties;
}
@Override
protected void configureLanguage(Language language) {
if (definition.getConfiguration() == null && definition.getConfigurationRef() != null) {
definition.setConfiguration(mandatoryLookup(definition.getConfigurationRef(), Object.class));
}
}
}
| XQueryExpressionReifier |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/Javadoc.java | {
"start": 521,
"end": 577
} | class ____ extends ModelElement {
public static | Javadoc |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java | {
"start": 3727,
"end": 5219
} | class ____ {
/** Bases */
static final int[] P = {2, 3};
/** Maximum number of digits allowed */
static final int[] K = {63, 40};
private long index;
private double[] x;
private double[][] q;
private int[][] d;
/** Initialize to H(startindex),
* so the sequence begins with H(startindex+1).
*/
HaltonSequence(long startindex) {
index = startindex;
x = new double[K.length];
q = new double[K.length][];
d = new int[K.length][];
for(int i = 0; i < K.length; i++) {
q[i] = new double[K[i]];
d[i] = new int[K[i]];
}
for(int i = 0; i < K.length; i++) {
long k = index;
x[i] = 0;
for(int j = 0; j < K[i]; j++) {
q[i][j] = (j == 0? 1.0: q[i][j-1])/P[i];
d[i][j] = (int)(k % P[i]);
k = (k - d[i][j])/P[i];
x[i] += d[i][j] * q[i][j];
}
}
}
/** Compute next point.
* Assume the current point is H(index).
* Compute H(index+1).
*
* @return a 2-dimensional point with coordinates in [0,1)^2
*/
double[] nextPoint() {
index++;
for(int i = 0; i < K.length; i++) {
for(int j = 0; j < K[i]; j++) {
d[i][j]++;
x[i] += q[i][j];
if (d[i][j] < P[i]) {
break;
}
d[i][j] = 0;
x[i] -= (j == 0? 1.0: q[i][j-1]);
}
}
return x;
}
}
/**
* Mapper | HaltonSequence |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java | {
"start": 21119,
"end": 21414
} | class ____ by the given configuration key. The configuration
* may specify either null or an empty string to indicate no configured instances. In both cases, this method
* returns an empty list to indicate no configured instances.
*
* @param classNames The list of | specified |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/ConfigurerResolverTest.java | {
"start": 1057,
"end": 1519
} | class ____ extends ContextTestSupport {
@Test
public void testConfigurerResolver() {
PropertyConfigurer resolver = PluginHelper.getConfigurerResolver(context)
.resolvePropertyConfigurer(context.getClass().getName(), context);
Assertions.assertNotNull(resolver);
resolver.configure(context, context, "name", "foobar", true);
Assertions.assertEquals("foobar", context.getName());
}
}
| ConfigurerResolverTest |
java | apache__flink | flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/src/impl/DynamicFileSplitEnumeratorTest.java | {
"start": 6725,
"end": 6889
} | class ____ extends FileSourceSplit {
public TestSplit(String id) {
super(id, new Path(), 0, 0, 0L, 0);
}
}
private static | TestSplit |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java | {
"start": 243894,
"end": 262529
} | class ____ to override the standard
// behavior for special kinds of expressions.
RexNode rex = convertExtendedExpression(expr, this);
if (rex != null) {
return rex;
}
// Sub-queries and OVER expressions are not like ordinary
// expressions.
final SqlKind kind = expr.getKind();
final SubQuery subQuery;
if (!config.isExpand()) {
final SqlCall call;
final SqlNode query;
final RelRoot root;
switch (kind) {
case IN:
case NOT_IN:
case SOME:
case ALL:
call = (SqlCall) expr;
query = call.operand(1);
if (!(query instanceof SqlNodeList)) {
root = convertQueryRecursive(query, false, null);
final SqlNode operand = call.operand(0);
List<SqlNode> nodes;
switch (operand.getKind()) {
case ROW:
nodes = ((SqlCall) operand).getOperandList();
break;
default:
nodes = ImmutableList.of(operand);
}
final ImmutableList.Builder<RexNode> builder = ImmutableList.builder();
for (SqlNode node : nodes) {
builder.add(convertExpression(node));
}
final ImmutableList<RexNode> list = builder.build();
switch (kind) {
case IN:
return RexSubQuery.in(root.rel, list);
case NOT_IN:
return rexBuilder.makeCall(
SqlStdOperatorTable.NOT,
RexSubQuery.in(root.rel, list));
case SOME:
return RexSubQuery.some(
root.rel,
list,
(SqlQuantifyOperator) call.getOperator());
case ALL:
return rexBuilder.makeCall(
SqlStdOperatorTable.NOT,
RexSubQuery.some(
root.rel,
list,
negate(
(SqlQuantifyOperator)
call.getOperator())));
default:
throw new AssertionError(kind);
}
}
break;
case EXISTS:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
RelNode rel = root.rel;
while (rel instanceof Project
|| rel instanceof Sort
&& ((Sort) rel).fetch == null
&& ((Sort) rel).offset == null) {
rel = ((SingleRel) rel).getInput();
}
return RexSubQuery.exists(rel);
case UNIQUE:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
return RexSubQuery.unique(root.rel);
case SCALAR_QUERY:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
return RexSubQuery.scalar(root.rel);
case ARRAY_QUERY_CONSTRUCTOR:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
return RexSubQuery.array(root.rel);
case MAP_QUERY_CONSTRUCTOR:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
return RexSubQuery.map(root.rel);
case MULTISET_QUERY_CONSTRUCTOR:
call = (SqlCall) expr;
query = Iterables.getOnlyElement(call.getOperandList());
root = convertQueryRecursive(query, false, null);
return RexSubQuery.multiset(root.rel);
default:
break;
}
}
switch (kind) {
case SOME:
case ALL:
case UNIQUE:
if (config.isExpand()) {
throw new RuntimeException(kind + " is only supported if expand = false");
}
// fall through
case CURSOR:
case IN:
case NOT_IN:
subQuery = getSubQuery(expr, null);
if (subQuery == null && (kind == SqlKind.SOME || kind == SqlKind.ALL)) {
break;
}
assert subQuery != null;
rex = requireNonNull(subQuery.expr);
return StandardConvertletTable.castToValidatedType(
expr, rex, validator(), rexBuilder, false);
case SELECT:
case EXISTS:
case SCALAR_QUERY:
case ARRAY_QUERY_CONSTRUCTOR:
case MAP_QUERY_CONSTRUCTOR:
case MULTISET_QUERY_CONSTRUCTOR:
subQuery = getSubQuery(expr, null);
assert subQuery != null;
rex = subQuery.expr;
assert rex != null : "rex != null";
if (((kind == SqlKind.SCALAR_QUERY) || (kind == SqlKind.EXISTS))
&& isConvertedSubq(rex)) {
// scalar sub-query or EXISTS has been converted to a
// constant
return rex;
}
// The indicator column is the last field of the sub-query.
RexNode fieldAccess =
rexBuilder.makeFieldAccess(rex, rex.getType().getFieldCount() - 1);
// The indicator column will be nullable if it comes from
// the null-generating side of the join. For EXISTS, add an
// "IS TRUE" check so that the result is "BOOLEAN NOT NULL".
if (fieldAccess.getType().isNullable() && kind == SqlKind.EXISTS) {
fieldAccess =
rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, fieldAccess);
}
return fieldAccess;
case OVER:
return convertOver(this, expr);
default:
// fall through
}
// Apply standard conversions.
rex = expr.accept(this);
return requireNonNull(rex, "rex");
}
/**
* Converts an item in an ORDER BY clause inside a window (OVER) clause, extracting DESC,
* NULLS LAST and NULLS FIRST flags first.
*/
@Deprecated // to be removed before 2.0
public RexFieldCollation convertSortExpression(
SqlNode expr,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection) {
return convertSortExpression(
expr, direction, nullDirection, this::sortToRexFieldCollation);
}
/**
* Handles an item in an ORDER BY clause, passing using a converter function to produce the
* final result.
*/
<R> R convertSortExpression(
SqlNode expr,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection,
SortExpressionConverter<R> converter) {
switch (expr.getKind()) {
case DESCENDING:
return convertSortExpression(
((SqlCall) expr).operand(0),
RelFieldCollation.Direction.DESCENDING,
nullDirection,
converter);
case NULLS_LAST:
return convertSortExpression(
((SqlCall) expr).operand(0),
direction,
RelFieldCollation.NullDirection.LAST,
converter);
case NULLS_FIRST:
return convertSortExpression(
((SqlCall) expr).operand(0),
direction,
RelFieldCollation.NullDirection.FIRST,
converter);
default:
return converter.convert(expr, direction, nullDirection);
}
}
// Only used by deprecated method "convertSortExpression", and will be
// removed with that method.
private RexFieldCollation sortToRexFieldCollation(
SqlNode expr,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection) {
final Set<SqlKind> flags = EnumSet.noneOf(SqlKind.class);
if (direction == RelFieldCollation.Direction.DESCENDING) {
flags.add(SqlKind.DESCENDING);
}
switch (nullDirection) {
case UNSPECIFIED:
final RelFieldCollation.NullDirection nullDefaultDirection =
validator().config().defaultNullCollation().last(desc(direction))
? RelFieldCollation.NullDirection.LAST
: RelFieldCollation.NullDirection.FIRST;
if (nullDefaultDirection != direction.defaultNullDirection()) {
SqlKind nullDirectionSqlKind =
validator().config().defaultNullCollation().last(desc(direction))
? SqlKind.NULLS_LAST
: SqlKind.NULLS_FIRST;
flags.add(nullDirectionSqlKind);
}
break;
case FIRST:
flags.add(SqlKind.NULLS_FIRST);
break;
case LAST:
flags.add(SqlKind.NULLS_LAST);
break;
default:
break;
}
return new RexFieldCollation(convertExpression(expr), flags);
}
private RexNode sortToRex(
SqlNode expr,
RelFieldCollation.Direction direction,
RelFieldCollation.NullDirection nullDirection) {
RexNode node = convertExpression(expr);
final boolean desc = direction == RelFieldCollation.Direction.DESCENDING;
if (desc) {
node = relBuilder.desc(node);
}
if (nullDirection == RelFieldCollation.NullDirection.UNSPECIFIED) {
final NullCollation nullCollation = validator().config().defaultNullCollation();
final boolean nullsLast = nullCollation.last(desc);
final boolean nullsFirst = !nullsLast;
if (!NullCollation.HIGH.isDefaultOrder(nullsFirst, desc)) {
nullDirection =
nullsLast
? RelFieldCollation.NullDirection.LAST
: RelFieldCollation.NullDirection.FIRST;
}
}
if (nullDirection == RelFieldCollation.NullDirection.FIRST) {
node = relBuilder.nullsFirst(node);
}
if (nullDirection == RelFieldCollation.NullDirection.LAST) {
node = relBuilder.nullsLast(node);
}
return node;
}
/**
* Determines whether a RexNode corresponds to a sub-query that's been converted to a
* constant.
*
* @param rex the expression to be examined
* @return true if the expression is a dynamic parameter, a literal, or a literal that is
* being cast
*/
private boolean isConvertedSubq(RexNode rex) {
if ((rex instanceof RexLiteral) || (rex instanceof RexDynamicParam)) {
return true;
}
if (rex instanceof RexCall) {
RexCall call = (RexCall) rex;
if (call.getOperator() == SqlStdOperatorTable.CAST) {
RexNode operand = call.getOperands().get(0);
if (operand instanceof RexLiteral) {
return true;
}
}
}
return false;
}
@Override
public int getGroupCount() {
if (agg != null) {
return agg.groupExprs.size();
}
if (window != null) {
return window.isAlwaysNonEmpty() ? 1 : 0;
}
return -1;
}
@Override
public RexBuilder getRexBuilder() {
return rexBuilder;
}
@Override
public SqlNode validateExpression(RelDataType rowType, SqlNode expr) {
return SqlValidatorUtil.validateExprWithRowType(
catalogReader.nameMatcher().isCaseSensitive(),
opTab,
typeFactory,
rowType,
expr)
.left;
}
@Override
public RexRangeRef getSubQueryExpr(SqlCall call) {
final SubQuery subQuery = getSubQuery(call, null);
assert subQuery != null;
return (RexRangeRef) requireNonNull(subQuery.expr, () -> "subQuery.expr for " + call);
}
@Override
public RelDataTypeFactory getTypeFactory() {
return typeFactory;
}
@Override
public InitializerExpressionFactory getInitializerExpressionFactory() {
return initializerExpressionFactory;
}
@Override
public SqlValidator getValidator() {
return validator();
}
@Override
public RexNode convertLiteral(SqlLiteral literal) {
return exprConverter.convertLiteral(this, literal);
}
public RexNode convertInterval(SqlIntervalQualifier intervalQualifier) {
return exprConverter.convertInterval(this, intervalQualifier);
}
@Override
public RexNode visit(SqlLiteral literal) {
return exprConverter.convertLiteral(this, literal);
}
@Override
public RexNode visit(SqlCall call) {
if (agg != null) {
final SqlOperator op = call.getOperator();
if (window == null
&& (op.isAggregator()
|| op.getKind() == SqlKind.FILTER
|| op.getKind() == SqlKind.WITHIN_DISTINCT
|| op.getKind() == SqlKind.WITHIN_GROUP)) {
return requireNonNull(
agg.lookupAggregates(call),
() -> "agg.lookupAggregates for call " + call);
}
}
// ----- FLINK MODIFICATION BEGIN -----
final SqlCall permutedCall =
new FlinkSqlCallBinding(validator(), scope, call).permutedCall();
final RelDataType typeIfKnown = validator().getValidatedNodeTypeIfKnown(call);
if (typeIfKnown != null) {
// Argument permutation should not affect the output type,
// reset it if it was known. Otherwise, the type inference would be called twice
// when converting to RexNode.
validator().setValidatedNodeType(permutedCall, typeIfKnown);
}
return exprConverter.convertCall(this, permutedCall);
// ----- FLINK MODIFICATION END -----
}
@Override
public RexNode visit(SqlNodeList nodeList) {
throw new UnsupportedOperationException();
}
@Override
public RexNode visit(SqlIdentifier id) {
return convertIdentifier(this, id);
}
@Override
public RexNode visit(SqlDataTypeSpec type) {
throw new UnsupportedOperationException();
}
@Override
public RexNode visit(SqlDynamicParam param) {
return convertDynamicParam(param);
}
@Override
public RexNode visit(SqlIntervalQualifier intervalQualifier) {
return convertInterval(intervalQualifier);
}
public List<SqlMonotonicity> getColumnMonotonicities() {
return columnMonotonicities;
}
}
private static SqlQuantifyOperator negate(SqlQuantifyOperator operator) {
assert operator.kind == SqlKind.ALL;
return SqlStdOperatorTable.some(operator.comparisonKind.negateNullSafe());
}
/** Deferred lookup. */
private static | chance |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/PooledObjectFactory.java | {
"start": 1108,
"end": 3046
} | interface ____ {
/**
* Number of new exchanges created.
*/
long getCreatedCounter();
/**
* Number of exchanges acquired (reused) when using pooled factory.
*/
long getAcquiredCounter();
/**
* Number of exchanges released back to pool
*/
long getReleasedCounter();
/**
* Number of exchanges discarded (thrown away) such as if no space in cache pool.
*/
long getDiscardedCounter();
/**
* Reset the counters
*/
void reset();
}
/**
* The current number of objects in the pool
*/
int getSize();
/**
* The capacity the pool uses for storing objects. The default capacity is 100.
*/
int getCapacity();
/**
* The capacity the pool uses for storing objects. The default capacity is 100.
*/
void setCapacity(int capacity);
/**
* Whether statistics is enabled.
*/
boolean isStatisticsEnabled();
/**
* Whether statistics is enabled.
*/
void setStatisticsEnabled(boolean statisticsEnabled);
/**
* Reset the statistics
*/
void resetStatistics();
/**
* Purges the internal cache (if pooled)
*/
void purge();
/**
* Gets the usage statistics
*
* @return the statistics, or null if statistics is not enabled
*/
Statistics getStatistics();
/**
* Whether the factory is pooled.
*/
boolean isPooled();
/**
* Acquires an object from the pool (if any)
*
* @return the object or <tt>null</tt> if the pool is empty
*/
T acquire();
/**
* Releases the object back to the pool
*
* @param t the object
* @return true if released into the pool, or false if something went wrong and the object was discarded
*/
boolean release(T t);
}
| Statistics |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/AllocationStatusTests.java | {
"start": 570,
"end": 1992
} | class ____ extends AbstractXContentSerializingTestCase<AllocationStatus> {
public static AllocationStatus randomInstance() {
return new AllocationStatus(randomInt(10), randomIntBetween(1, 10));
}
@Override
protected AllocationStatus doParseInstance(XContentParser parser) throws IOException {
return AllocationStatus.fromXContent(parser);
}
@Override
protected Writeable.Reader<AllocationStatus> instanceReader() {
return AllocationStatus::new;
}
@Override
protected AllocationStatus createTestInstance() {
return randomInstance();
}
@Override
protected AllocationStatus mutateInstance(AllocationStatus instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
public void testCalculateState() {
int targetAllocation = randomIntBetween(2, 10);
assertThat(
new AllocationStatus(randomIntBetween(1, targetAllocation - 1), targetAllocation).calculateState(),
equalTo(AllocationStatus.State.STARTED)
);
assertThat(new AllocationStatus(0, targetAllocation).calculateState(), equalTo(AllocationStatus.State.STARTING));
assertThat(
new AllocationStatus(targetAllocation, targetAllocation).calculateState(),
equalTo(AllocationStatus.State.FULLY_ALLOCATED)
);
}
}
| AllocationStatusTests |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java | {
"start": 12107,
"end": 13821
} | class ____ extends ChildLbState {
private final Set<WrrSubchannel> subchannels = new HashSet<>();
private volatile long lastUpdated;
private volatile long nonEmptySince;
private volatile double weight = 0;
private OrcaReportListener orcaReportListener;
public WeightedChildLbState(Object key, LoadBalancerProvider policyProvider) {
super(key, policyProvider);
}
@Override
protected ChildLbStateHelper createChildHelper() {
return new WrrChildLbStateHelper();
}
private double getWeight(AtomicInteger staleEndpoints, AtomicInteger notYetUsableEndpoints) {
if (config == null) {
return 0;
}
long now = ticker.nanoTime();
if (now - lastUpdated >= config.weightExpirationPeriodNanos) {
nonEmptySince = infTime;
staleEndpoints.incrementAndGet();
return 0;
} else if (now - nonEmptySince < config.blackoutPeriodNanos
&& config.blackoutPeriodNanos > 0) {
notYetUsableEndpoints.incrementAndGet();
return 0;
} else {
return weight;
}
}
public void addSubchannel(WrrSubchannel wrrSubchannel) {
subchannels.add(wrrSubchannel);
}
public OrcaReportListener getOrCreateOrcaListener(float errorUtilizationPenalty) {
if (orcaReportListener != null
&& orcaReportListener.errorUtilizationPenalty == errorUtilizationPenalty) {
return orcaReportListener;
}
orcaReportListener = new OrcaReportListener(errorUtilizationPenalty);
return orcaReportListener;
}
public void removeSubchannel(WrrSubchannel wrrSubchannel) {
subchannels.remove(wrrSubchannel);
}
final | WeightedChildLbState |
java | quarkusio__quarkus | extensions/spring-web/resteasy-classic/tests/src/test/java/io/quarkus/spring/web/resteasy/classic/test/ResponseStatusAndExceptionHandlerTest.java | {
"start": 2070,
"end": 2760
} | class ____ {
@ExceptionHandler(RuntimeException.class)
public ResponseEntity<Object> handleException(Exception ex) {
return new ResponseEntity<>(HttpStatus.BAD_REQUEST);
}
@ExceptionHandler(IllegalArgumentException.class)
public ResponseEntity<Object> handleException(Exception ex, HttpServletRequest request, HttpServletResponse response) {
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.value());
request.setAttribute("javax.servlet.error.status_code", HttpStatus.INTERNAL_SERVER_ERROR.value());
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
}
| RestExceptionHandler |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/jinaai/JinaAIModel.java | {
"start": 1042,
"end": 2374
} | class ____ extends Model {
private final SecureString apiKey;
private final JinaAIRateLimitServiceSettings rateLimitServiceSettings;
public JinaAIModel(
ModelConfigurations configurations,
ModelSecrets secrets,
@Nullable ApiKeySecrets apiKeySecrets,
JinaAIRateLimitServiceSettings rateLimitServiceSettings
) {
super(configurations, secrets);
this.rateLimitServiceSettings = Objects.requireNonNull(rateLimitServiceSettings);
apiKey = ServiceUtils.apiKey(apiKeySecrets);
}
protected JinaAIModel(JinaAIModel model, TaskSettings taskSettings) {
super(model, taskSettings);
rateLimitServiceSettings = model.rateLimitServiceSettings();
apiKey = model.apiKey();
}
protected JinaAIModel(JinaAIModel model, ServiceSettings serviceSettings) {
super(model, serviceSettings);
rateLimitServiceSettings = model.rateLimitServiceSettings();
apiKey = model.apiKey();
}
public SecureString apiKey() {
return apiKey;
}
public JinaAIRateLimitServiceSettings rateLimitServiceSettings() {
return rateLimitServiceSettings;
}
public abstract ExecutableAction accept(JinaAIActionVisitor creator, Map<String, Object> taskSettings);
public abstract URI uri();
}
| JinaAIModel |
java | apache__camel | components/camel-xmlsecurity/src/main/java/org/apache/camel/component/xmlsecurity/api/XmlSignatureHelper.java | {
"start": 2392,
"end": 6742
} | class ____ {
private XmlSignatureHelper() {
// Helper class
}
/**
* Returns a configuration for a canonicalization algorithm.
*
* @param algorithm algorithm URI
* @return canonicalization
* @throws IllegalArgumentException if <tt>algorithm</tt> is <code>null</code>
*/
public static AlgorithmMethod getCanonicalizationMethod(String algorithm) {
return getCanonicalizationMethod(algorithm, null);
}
/**
* Returns a configuration for a canonicalization algorithm.
*
* @param algorithm algorithm URI
* @param inclusiveNamespacePrefixes namespace prefixes which should be treated like in the inclusive
* canonicalization, only relevant if the algorithm is exclusive
* @return canonicalization
* @throws IllegalArgumentException if <tt>algorithm</tt> is <code>null</code>
*/
public static AlgorithmMethod getCanonicalizationMethod(String algorithm, List<String> inclusiveNamespacePrefixes) {
if (algorithm == null) {
throw new IllegalArgumentException("algorithm is null");
}
XmlSignatureTransform canonicalizationMethod = new XmlSignatureTransform(algorithm);
if (inclusiveNamespacePrefixes != null) {
ExcC14NParameterSpec parameters = new ExcC14NParameterSpec(inclusiveNamespacePrefixes);
canonicalizationMethod.setParameterSpec(parameters);
}
return canonicalizationMethod;
}
public static AlgorithmMethod getEnvelopedTransform() {
return new XmlSignatureTransform(Transform.ENVELOPED);
}
/**
* Returns a configuration for a base64 transformation.
*
* @return Base64 transformation
*/
public static AlgorithmMethod getBase64Transform() {
return new XmlSignatureTransform(Transform.BASE64);
}
/**
* Returns a configuration for an XPATH transformation.
*
* @param xpath XPATH expression
* @return XPATH transformation
* @throws IllegalArgumentException if <tt>xpath</tt> is <code>null</code>
*/
public static AlgorithmMethod getXPathTransform(String xpath) {
return getXPathTransform(xpath, null);
}
/**
* Returns a configuration for an XPATH transformation which needs a namespace map.
*
* @param xpath XPATH expression
* @param namespaceMap namespace map, key is the prefix, value is the namespace, can be
* <code>null</code>
* @throws IllegalArgumentException if <tt>xpath</tt> is <code>null</code>
* @return XPATH transformation
*/
public static AlgorithmMethod getXPathTransform(String xpath, Map<String, String> namespaceMap) {
if (xpath == null) {
throw new IllegalArgumentException("xpath is null");
}
XmlSignatureTransform transformXPath = new XmlSignatureTransform();
transformXPath.setAlgorithm(Transform.XPATH);
XPathFilterParameterSpec params = getXpathFilter(xpath, namespaceMap);
transformXPath.setParameterSpec(params);
return transformXPath;
}
public static XPathFilterParameterSpec getXpathFilter(String xpath, Map<String, String> namespaceMap) {
XPathFilterParameterSpec params = namespaceMap == null
? new XPathFilterParameterSpec(xpath) : new XPathFilterParameterSpec(
xpath,
namespaceMap);
return params;
}
public static XPathFilterParameterSpec getXpathFilter(String xpath) {
return getXpathFilter(xpath, null);
}
@SuppressWarnings("unchecked")
public static XPathExpression getXPathExpression(XPathFilterParameterSpec xpathFilter) throws XPathExpressionException {
XPathFactory factory = XPathFactory.newInstance();
XPath xpath = factory.newXPath();
if (xpathFilter.getNamespaceMap() != null) {
xpath.setNamespaceContext(new XPathNamespaceContext(xpathFilter.getNamespaceMap()));
}
return xpath.compile(xpathFilter.getXPath());
}
private static | XmlSignatureHelper |
java | apache__dubbo | dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/metadata/ServiceInstanceMetadataUtils.java | {
"start": 2910,
"end": 3141
} | class ____ the {@link ServiceInstance#getMetadata() metadata of the service instance}
*
* @see StandardMetadataServiceURLBuilder
* @see ServiceInstance#getMetadata()
* @see MetadataService
* @see URL
* @since 2.7.5
*/
public | for |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java | {
"start": 73424,
"end": 75475
} | class ____ implements Runnable {
private final Logger logger;
private final TaskInvokable invokable;
private final Thread executor;
private final String taskName;
TaskCanceler(Logger logger, TaskInvokable invokable, Thread executor, String taskName) {
this.logger = logger;
this.invokable = invokable;
this.executor = executor;
this.taskName = taskName;
}
@Override
public void run() {
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
// the user-defined cancel method may throw errors.
// we need do continue despite that
try {
invokable.cancel();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
logger.error("Error while canceling the task {}.", taskName, t);
}
// Early release of input and output buffer pools. We do this
// in order to unblock async Threads, which produce/consume the
// intermediate streams outside of the main Task Thread (like
// the Kafka consumer).
// Notes: 1) This does not mean to release all network resources,
// the task thread itself will release them; 2) We can not close
// ResultPartitions here because of possible race conditions with
// Task thread so we just call the fail here.
failAllResultPartitions();
closeAllInputGates();
invokable.maybeInterruptOnCancel(executor, null, null);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
logger.error("Error in the task canceler for task {}.", taskName, t);
}
}
}
/** This thread sends the delayed, periodic interrupt calls to the executing thread. */
private static final | TaskCanceler |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java | {
"start": 19191,
"end": 19373
} | interface ____ {
public String getTestString();
}
""";
String implementingClass = """
package p;
public | MyService |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/BannerTests.java | {
"start": 1847,
"end": 4184
} | class ____ {
private @Nullable ConfigurableApplicationContext context;
@AfterEach
void cleanUp() {
if (this.context != null) {
this.context.close();
}
}
@Captor
@SuppressWarnings("NullAway.Init")
private ArgumentCaptor<Class<?>> sourceClassCaptor;
@Test
void testDefaultBanner(CapturedOutput output) {
SpringApplication application = createSpringApplication();
this.context = application.run();
assertThat(output).contains(":: Spring Boot ::");
}
@Test
void testDefaultBannerInLog(CapturedOutput output) {
SpringApplication application = createSpringApplication();
this.context = application.run();
assertThat(output).contains(":: Spring Boot ::");
}
@Test
void testCustomBanner(CapturedOutput output) {
SpringApplication application = createSpringApplication();
application.setBanner(new DummyBanner());
this.context = application.run();
assertThat(output).contains("My Banner");
}
@Test
void testBannerInContext() {
SpringApplication application = createSpringApplication();
this.context = application.run();
assertThat(this.context.containsBean("springBootBanner")).isTrue();
}
@Test
void testCustomBannerInContext() {
SpringApplication application = createSpringApplication();
Banner banner = mock(Banner.class);
application.setBanner(banner);
this.context = application.run();
Banner printedBanner = (Banner) this.context.getBean("springBootBanner");
assertThat(printedBanner).hasFieldOrPropertyWithValue("banner", banner);
then(banner).should()
.printBanner(any(Environment.class), this.sourceClassCaptor.capture(), any(PrintStream.class));
reset(banner);
printedBanner.printBanner(this.context.getEnvironment(), null, System.out);
then(banner).should()
.printBanner(any(Environment.class), eq(this.sourceClassCaptor.getValue()), any(PrintStream.class));
}
@Test
void testDisableBannerInContext() {
SpringApplication application = createSpringApplication();
application.setBannerMode(Mode.OFF);
this.context = application.run();
assertThat(this.context.containsBean("springBootBanner")).isFalse();
}
private SpringApplication createSpringApplication() {
SpringApplication application = new SpringApplication(Config.class);
application.setWebApplicationType(WebApplicationType.NONE);
return application;
}
static | BannerTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/boot/models/xml/attr/ManyToOneTests.java | {
"start": 1656,
"end": 4473
} | class ____ {
@Test
@SuppressWarnings("JUnitMalformedDeclaration")
void testSimpleManyToOne(ServiceRegistryScope scope) {
final StandardServiceRegistry serviceRegistry = scope.getRegistry();
final ManagedResources managedResources = new AdditionalManagedResourcesImpl.Builder()
.addXmlMappings( "mappings/models/attr/many-to-one/simple.xml" )
.build();
final ModelsContext ModelsContext = createBuildingContext( managedResources, serviceRegistry );
final ClassDetailsRegistry classDetailsRegistry = ModelsContext.getClassDetailsRegistry();
final ClassDetails classDetails = classDetailsRegistry.getClassDetails( SimpleEntity.class.getName() );
final FieldDetails parentField = classDetails.findFieldByName( "parent" );
final ManyToOne manyToOneAnn = parentField.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneAnn ).isNotNull();
final JoinColumnsOrFormulas joinColumnsOrFormulas = parentField.getDirectAnnotationUsage( JoinColumnsOrFormulas.class );
assertThat( joinColumnsOrFormulas.value() ).hasSize( 1 );
final JoinColumnOrFormula joinColumnOrFormula = joinColumnsOrFormulas.value()[0];
assertThat( joinColumnOrFormula.formula() ).isNotNull();
assertThat( joinColumnOrFormula.formula().value() ).isNull();
final JoinColumn joinColumnAnn = joinColumnOrFormula.column();
assertThat( joinColumnAnn ).isNotNull();
assertThat( joinColumnAnn.name() ).isEqualTo( "parent_fk" );
final NotFound notFoundAnn = parentField.getDirectAnnotationUsage( NotFound.class );
assertThat( notFoundAnn ).isNotNull();
assertThat( notFoundAnn.action() ).isEqualTo( NotFoundAction.IGNORE );
final OnDelete onDeleteAnn = parentField.getDirectAnnotationUsage( OnDelete.class );
assertThat( onDeleteAnn ).isNotNull();
assertThat( onDeleteAnn.action() ).isEqualTo( OnDeleteAction.CASCADE );
final Fetch fetchAnn = parentField.getDirectAnnotationUsage( Fetch.class );
assertThat( fetchAnn ).isNotNull();
assertThat( fetchAnn.value() ).isEqualTo( FetchMode.SELECT );
final OptimisticLock optLockAnn = parentField.getDirectAnnotationUsage( OptimisticLock.class );
assertThat( optLockAnn ).isNotNull();
assertThat( optLockAnn.excluded() ).isTrue();
final Target targetAnn = parentField.getDirectAnnotationUsage( Target.class );
assertThat( targetAnn ).isNotNull();
assertThat( targetAnn.value() ).isEqualTo( "org.hibernate.orm.test.boot.models.xml.attr.ManyToOneTests$SimpleEntity" );
final Cascade cascadeAnn = parentField.getDirectAnnotationUsage( Cascade.class );
final CascadeType[] cascadeTypes = cascadeAnn.value();
assertThat( cascadeTypes ).isNotEmpty();
assertThat( cascadeTypes ).containsOnly( CascadeType.ALL );
}
@SuppressWarnings("unused")
@Entity(name="SimpleEntity")
@Table(name="SimpleEntity")
public static | ManyToOneTests |
java | elastic__elasticsearch | libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java | {
"start": 2715,
"end": 3019
} | class ____ extends ExtensibleClass {}
"""), "p/B.class", InMemoryJavaCompiler.compile("p.B", """
package p;
import org.elasticsearch.plugin.*;
import org.elasticsearch.plugin.scanner.test_model.*;
@NamedComponent("b_component")
public | A |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/ArtifactFactoryRequest.java | {
"start": 2700,
"end": 4444
} | class ____ {
private Session session;
private RequestTrace trace;
private String groupId;
private String artifactId;
private String version;
private String classifier;
private String extension;
private String type;
ArtifactFactoryRequestBuilder() {}
public ArtifactFactoryRequestBuilder session(Session session) {
this.session = session;
return this;
}
public ArtifactFactoryRequestBuilder trace(RequestTrace trace) {
this.trace = trace;
return this;
}
public ArtifactFactoryRequestBuilder groupId(String groupId) {
this.groupId = groupId;
return this;
}
public ArtifactFactoryRequestBuilder artifactId(String artifactId) {
this.artifactId = artifactId;
return this;
}
public ArtifactFactoryRequestBuilder version(String version) {
this.version = version;
return this;
}
public ArtifactFactoryRequestBuilder classifier(String classifier) {
this.classifier = classifier;
return this;
}
public ArtifactFactoryRequestBuilder extension(String extension) {
this.extension = extension;
return this;
}
public ArtifactFactoryRequestBuilder type(String type) {
this.type = type;
return this;
}
public ArtifactFactoryRequest build() {
return new DefaultArtifactFactoryRequest(
session, trace, groupId, artifactId, version, classifier, extension, type);
}
private static | ArtifactFactoryRequestBuilder |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/multipart/Part.java | {
"start": 885,
"end": 1577
} | class ____<D> {
/**
* Name of the parameter in Multipart request body.
*/
protected final String name;
/**
* @param name Name of the parameter
*/
Part(String name) {
if (name == null) {
throw new IllegalArgumentException("Adding parts with a null name is not allowed");
}
this.name = name;
}
/**
* @return The content of this part.
*/
abstract D getContent();
/**
* @param factory The factory used to create the multipart data
* @return The multi part data object
* @param <T> The data
*/
abstract @NonNull <T> T getData(@NonNull MultipartDataFactory<T> factory);
}
| Part |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java | {
"start": 1616,
"end": 6965
} | class ____ extends CcrIntegTestCase {
private Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
@Before
public void wrapUncaughtExceptionHandler() {
CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(true);
uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName())
&& t.getName().equals("elasticsearch-error-rethrower")) {
for (StackTraceElement element : e.getStackTrace()) {
if (element.getClassName().equals(ReadOnlyEngine.class.getName())) {
if (element.getMethodName().equals("assertMaxSeqNoEqualsToGlobalCheckpoint")) {
logger.error("HACK: suppressing uncaught exception thrown from assertMaxSeqNoEqualsToGlobalCheckpoint", e);
return;
}
}
}
}
uncaughtExceptionHandler.uncaughtException(t, e);
});
return null;
});
}
@After
public void restoreUncaughtExceptionHandler() {
CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(false);
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler);
return null;
});
}
public void testCloseAndReopenFollowerIndex() throws Exception {
final String leaderIndexSettings = getIndexSettings(1, 1);
assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderYellow("index1");
PutFollowAction.Request followRequest = new PutFollowAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
followRequest.setRemoteCluster("leader_cluster");
followRequest.setLeaderIndex("index1");
followRequest.setFollowerIndex("index2");
followRequest.getParameters().setMaxRetryDelay(TimeValue.timeValueMillis(10));
followRequest.getParameters().setReadPollTimeout(TimeValue.timeValueMillis(10));
followRequest.getParameters().setMaxReadRequestSize(ByteSizeValue.ofBytes(1));
followRequest.getParameters().setMaxOutstandingReadRequests(128);
followRequest.waitForActiveShards(ActiveShardCount.DEFAULT);
followerClient().execute(PutFollowAction.INSTANCE, followRequest).get();
ensureFollowerGreen("index2");
AtomicBoolean isRunning = new AtomicBoolean(true);
int numThreads = 4;
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; i++) {
threads[i] = new Thread(() -> {
while (isRunning.get()) {
leaderClient().prepareIndex("index1").setSource("{}", XContentType.JSON).get();
}
});
threads[i].start();
}
atLeastDocsIndexed(followerClient(), "index2", 32);
CloseIndexRequest closeIndexRequest = new CloseIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE);
closeIndexRequest.waitForActiveShards(ActiveShardCount.NONE);
AcknowledgedResponse response = followerClient().admin().indices().close(closeIndexRequest).get();
assertThat(response.isAcknowledged(), is(true));
ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(clusterState.metadata().getProject().index("index2").getState(), is(IndexMetadata.State.CLOSE));
assertThat(
clusterState.getBlocks().hasIndexBlock(Metadata.DEFAULT_PROJECT_ID, "index2", MetadataIndexStateService.INDEX_CLOSED_BLOCK),
is(true)
);
isRunning.set(false);
for (Thread thread : threads) {
thread.join();
}
assertAcked(followerClient().admin().indices().open(new OpenIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE)).get());
clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(clusterState.metadata().getProject().index("index2").getState(), is(IndexMetadata.State.OPEN));
assertThat(
clusterState.getBlocks()
.hasIndexBlockWithId(Metadata.DEFAULT_PROJECT_ID, "index2", MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID),
is(false)
);
ensureFollowerGreen("index2");
refresh(leaderClient(), "index1");
long leaderIndexDocs = SearchResponseUtils.getTotalHitsValue(leaderClient().prepareSearch("index1").setTrackTotalHits(true));
assertBusy(() -> {
refresh(followerClient(), "index2");
long followerIndexDocs = SearchResponseUtils.getTotalHitsValue(
followerClient().prepareSearch("index2").setTrackTotalHits(true)
);
assertThat(followerIndexDocs, equalTo(leaderIndexDocs));
}, 30L, TimeUnit.SECONDS);
}
}
| CloseFollowerIndexIT |
java | google__guava | android/guava-tests/test/com/google/common/collect/TreeBasedTableTest.java | {
"start": 1798,
"end": 15458
} | class ____ extends AbstractTableTest<Character> {
@J2ktIncompatible
@GwtIncompatible // suite
@AndroidIncompatible // test-suite builders
public static Test suite() {
TestSuite suite = new TestSuite();
suite.addTestSuite(TreeBasedTableTest.class);
suite.addTest(
SortedMapTestSuiteBuilder.using(
new TestStringSortedMapGenerator() {
@Override
protected SortedMap<String, String> create(Entry<String, String>[] entries) {
TreeBasedTable<String, String, String> table = TreeBasedTable.create();
table.put("a", "b", "c");
table.put("c", "b", "a");
table.put("a", "a", "d");
for (Entry<String, String> entry : entries) {
table.put("b", entry.getKey(), entry.getValue());
}
return table.row("b");
}
})
.withFeatures(
MapFeature.GENERAL_PURPOSE,
CollectionFeature.SUPPORTS_ITERATOR_REMOVE,
CollectionSize.ANY)
.named("RowMapTestSuite")
.createTestSuite());
return suite;
}
private TreeBasedTable<String, Integer, Character> sortedTable;
protected TreeBasedTable<String, Integer, Character> create(
Comparator<? super String> rowComparator,
Comparator<? super Integer> columnComparator,
Object... data) {
TreeBasedTable<String, Integer, Character> table =
TreeBasedTable.create(rowComparator, columnComparator);
table.put("foo", 4, 'a');
table.put("cat", 1, 'b');
table.clear();
populate(table, data);
return table;
}
@Override
protected TreeBasedTable<String, Integer, Character> create(@Nullable Object... data) {
TreeBasedTable<String, Integer, Character> table = TreeBasedTable.create();
table.put("foo", 4, 'a');
table.put("cat", 1, 'b');
table.clear();
populate(table, data);
return table;
}
public void testCreateExplicitComparators() {
table = TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
table.put("foo", 3, 'a');
table.put("foo", 12, 'b');
table.put("bar", 5, 'c');
table.put("cat", 8, 'd');
assertThat(table.rowKeySet()).containsExactly("foo", "cat", "bar").inOrder();
assertThat(table.row("foo").keySet()).containsExactly(12, 3).inOrder();
}
public void testCreateCopy() {
TreeBasedTable<String, Integer, Character> original =
TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
original.put("foo", 3, 'a');
original.put("foo", 12, 'b');
original.put("bar", 5, 'c');
original.put("cat", 8, 'd');
table = TreeBasedTable.create(original);
assertThat(table.rowKeySet()).containsExactly("foo", "cat", "bar").inOrder();
assertThat(table.row("foo").keySet()).containsExactly(12, 3).inOrder();
assertEquals(original, table);
}
@J2ktIncompatible
@GwtIncompatible // SerializableTester
public void testSerialization() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
SerializableTester.reserializeAndAssert(table);
}
public void testToString_ordered() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertEquals("{bar={1=b}, foo={1=a, 3=c}}", table.toString());
assertEquals("{bar={1=b}, foo={1=a, 3=c}}", table.rowMap().toString());
}
public void testCellSetToString_ordered() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertEquals("[(bar,1)=b, (foo,1)=a, (foo,3)=c]", table.cellSet().toString());
}
public void testRowKeySetToString_ordered() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertEquals("[bar, foo]", table.rowKeySet().toString());
}
public void testValuesToString_ordered() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertEquals("[b, a, c]", table.values().toString());
}
@SuppressWarnings({"deprecation", "InlineMeInliner"}) // test of a deprecated method
public void testRowComparator() {
sortedTable = TreeBasedTable.create();
assertSame(Ordering.natural(), sortedTable.rowComparator());
sortedTable = TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
assertSame(Collections.reverseOrder(), sortedTable.rowComparator());
}
public void testColumnComparator() {
sortedTable = TreeBasedTable.create();
sortedTable.put("", 42, 'x');
assertSame(Ordering.natural(), sortedTable.columnComparator());
assertSame(
Ordering.natural(),
((SortedMap<Integer, Character>) sortedTable.rowMap().values().iterator().next())
.comparator());
sortedTable = TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
sortedTable.put("", 42, 'x');
assertSame(Ordering.usingToString(), sortedTable.columnComparator());
assertSame(
Ordering.usingToString(),
((SortedMap<Integer, Character>) sortedTable.rowMap().values().iterator().next())
.comparator());
}
public void testRowKeySetComparator() {
sortedTable = TreeBasedTable.create();
assertSame(Ordering.natural(), sortedTable.rowKeySet().comparator());
sortedTable = TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
assertSame(Collections.reverseOrder(), sortedTable.rowKeySet().comparator());
}
public void testRowKeySetFirst() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertSame("bar", sortedTable.rowKeySet().first());
}
public void testRowKeySetLast() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertSame("foo", sortedTable.rowKeySet().last());
}
public void testRowKeySetHeadSet() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
Set<String> set = sortedTable.rowKeySet().headSet("cat");
assertEquals(singleton("bar"), set);
set.clear();
assertTrue(set.isEmpty());
assertEquals(singleton("foo"), sortedTable.rowKeySet());
}
public void testRowKeySetTailSet() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
Set<String> set = sortedTable.rowKeySet().tailSet("cat");
assertEquals(singleton("foo"), set);
set.clear();
assertTrue(set.isEmpty());
assertEquals(singleton("bar"), sortedTable.rowKeySet());
}
public void testRowKeySetSubSet() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c', "dog", 2, 'd');
Set<String> set = sortedTable.rowKeySet().subSet("cat", "egg");
assertEquals(singleton("dog"), set);
set.clear();
assertTrue(set.isEmpty());
assertEquals(ImmutableSet.of("bar", "foo"), sortedTable.rowKeySet());
}
public void testRowMapComparator() {
sortedTable = TreeBasedTable.create();
assertSame(Ordering.natural(), sortedTable.rowMap().comparator());
sortedTable = TreeBasedTable.create(Collections.reverseOrder(), Ordering.usingToString());
assertSame(Collections.reverseOrder(), sortedTable.rowMap().comparator());
}
public void testRowMapFirstKey() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertSame("bar", sortedTable.rowMap().firstKey());
}
public void testRowMapLastKey() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
assertSame("foo", sortedTable.rowMap().lastKey());
}
public void testRowKeyMapHeadMap() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
Map<String, Map<Integer, Character>> map = sortedTable.rowMap().headMap("cat");
assertEquals(1, map.size());
assertEquals(ImmutableMap.of(1, 'b'), map.get("bar"));
map.clear();
assertTrue(map.isEmpty());
assertEquals(singleton("foo"), sortedTable.rowKeySet());
}
public void testRowKeyMapTailMap() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
Map<String, Map<Integer, Character>> map = sortedTable.rowMap().tailMap("cat");
assertEquals(1, map.size());
assertEquals(ImmutableMap.of(1, 'a', 3, 'c'), map.get("foo"));
map.clear();
assertTrue(map.isEmpty());
assertEquals(singleton("bar"), sortedTable.rowKeySet());
}
public void testRowKeyMapSubMap() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c', "dog", 2, 'd');
Map<String, Map<Integer, Character>> map = sortedTable.rowMap().subMap("cat", "egg");
assertEquals(ImmutableMap.of(2, 'd'), map.get("dog"));
map.clear();
assertTrue(map.isEmpty());
assertEquals(ImmutableSet.of("bar", "foo"), sortedTable.rowKeySet());
}
public void testRowMapValuesAreSorted() {
sortedTable = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c', "dog", 2, 'd');
assertTrue(sortedTable.rowMap().get("foo") instanceof SortedMap);
}
public void testColumnKeySet_isSorted() {
table =
create(
"a", 2, 'X', "a", 2, 'X', "b", 3, 'X', "b", 2, 'X', "c", 10, 'X', "c", 10, 'X', "c", 20,
'X', "d", 15, 'X', "d", 20, 'X', "d", 1, 'X', "e", 5, 'X');
assertEquals("[1, 2, 3, 5, 10, 15, 20]", table.columnKeySet().toString());
}
public void testColumnKeySet_isSortedWithRealComparator() {
table =
create(
String.CASE_INSENSITIVE_ORDER,
Ordering.<Integer>natural().reverse(),
"a",
2,
'X',
"a",
2,
'X',
"b",
3,
'X',
"b",
2,
'X',
"c",
10,
'X',
"c",
10,
'X',
"c",
20,
'X',
"d",
15,
'X',
"d",
20,
'X',
"d",
1,
'X',
"e",
5,
'X');
assertEquals("[20, 15, 10, 5, 3, 2, 1]", table.columnKeySet().toString());
}
public void testColumnKeySet_empty() {
table = create();
assertEquals("[]", table.columnKeySet().toString());
}
public void testColumnKeySet_oneRow() {
table = create("a", 2, 'X', "a", 1, 'X');
assertEquals("[1, 2]", table.columnKeySet().toString());
}
public void testColumnKeySet_oneColumn() {
table = create("a", 1, 'X', "b", 1, 'X');
assertEquals("[1]", table.columnKeySet().toString());
}
public void testColumnKeySet_oneEntry() {
table = create("a", 1, 'X');
assertEquals("[1]", table.columnKeySet().toString());
}
public void testRowEntrySetContains() {
table =
sortedTable =
create(
"a", 2, 'X', "a", 2, 'X', "b", 3, 'X', "b", 2, 'X', "c", 10, 'X', "c", 10, 'X', "c",
20, 'X', "d", 15, 'X', "d", 20, 'X', "d", 1, 'X', "e", 5, 'X');
SortedMap<Integer, Character> row = sortedTable.row("c");
Set<Entry<Integer, Character>> entrySet = row.entrySet();
assertTrue(entrySet.contains(immutableEntry(10, 'X')));
assertTrue(entrySet.contains(immutableEntry(20, 'X')));
assertFalse(entrySet.contains(immutableEntry(15, 'X')));
entrySet = row.tailMap(15).entrySet();
assertFalse(entrySet.contains(immutableEntry(10, 'X')));
assertTrue(entrySet.contains(immutableEntry(20, 'X')));
assertFalse(entrySet.contains(immutableEntry(15, 'X')));
}
public void testRowEntrySetRemove() {
table =
sortedTable =
create(
"a", 2, 'X', "a", 2, 'X', "b", 3, 'X', "b", 2, 'X', "c", 10, 'X', "c", 10, 'X', "c",
20, 'X', "d", 15, 'X', "d", 20, 'X', "d", 1, 'X', "e", 5, 'X');
SortedMap<Integer, Character> row = sortedTable.row("c");
Set<Entry<Integer, Character>> entrySet = row.tailMap(15).entrySet();
assertFalse(entrySet.remove(immutableEntry(10, 'X')));
assertTrue(entrySet.remove(immutableEntry(20, 'X')));
assertFalse(entrySet.remove(immutableEntry(15, 'X')));
entrySet = row.entrySet();
assertTrue(entrySet.remove(immutableEntry(10, 'X')));
assertFalse(entrySet.remove(immutableEntry(20, 'X')));
assertFalse(entrySet.remove(immutableEntry(15, 'X')));
}
public void testRowSize() {
table =
sortedTable =
create(
"a", 2, 'X', "a", 2, 'X', "b", 3, 'X', "b", 2, 'X', "c", 10, 'X', "c", 10, 'X', "c",
20, 'X', "d", 15, 'X', "d", 20, 'X', "d", 1, 'X', "e", 5, 'X');
SortedMap<Integer, Character> row = sortedTable.row("c");
assertEquals(2, row.size());
assertEquals(1, row.tailMap(15).size());
}
public void testSubRowClearAndPut() {
table = create("foo", 1, 'a', "bar", 1, 'b', "foo", 3, 'c');
SortedMap<Integer, Character> row = (SortedMap<Integer, Character>) table.row("foo");
SortedMap<Integer, Character> subRow = row.tailMap(2);
assertEquals(ImmutableMap.of(1, 'a', 3, 'c'), row);
assertEquals(ImmutableMap.of(3, 'c'), subRow);
table.remove("foo", 3);
assertEquals(ImmutableMap.of(1, 'a'), row);
assertEquals(ImmutableMap.of(), subRow);
table.remove("foo", 1);
assertEquals(ImmutableMap.of(), row);
assertEquals(ImmutableMap.of(), subRow);
table.put("foo", 2, 'b');
assertEquals(ImmutableMap.of(2, 'b'), row);
assertEquals(ImmutableMap.of(2, 'b'), subRow);
row.clear();
assertEquals(ImmutableMap.of(), row);
assertEquals(ImmutableMap.of(), subRow);
table.put("foo", 5, 'x');
assertEquals(ImmutableMap.of(5, 'x'), row);
assertEquals(ImmutableMap.of(5, 'x'), subRow);
}
}
| TreeBasedTableTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactory.java | {
"start": 2021,
"end": 7106
} | class ____ {
private static final Logger logger = LogManager.getLogger(ElectionSchedulerFactory.class);
private static final String ELECTION_INITIAL_TIMEOUT_SETTING_KEY = "cluster.election.initial_timeout";
private static final String ELECTION_BACK_OFF_TIME_SETTING_KEY = "cluster.election.back_off_time";
private static final String ELECTION_MAX_TIMEOUT_SETTING_KEY = "cluster.election.max_timeout";
private static final String ELECTION_DURATION_SETTING_KEY = "cluster.election.duration";
/*
* The first election is scheduled to occur a random number of milliseconds after the scheduler is started, where the random number of
* milliseconds is chosen uniformly from
*
* (0, min(ELECTION_INITIAL_TIMEOUT_SETTING, ELECTION_MAX_TIMEOUT_SETTING)]
*
* For `n > 1`, the `n`th election is scheduled to occur a random number of milliseconds after the `n - 1`th election, where the random
* number of milliseconds is chosen uniformly from
*
* (0, min(ELECTION_INITIAL_TIMEOUT_SETTING + (n-1) * ELECTION_BACK_OFF_TIME_SETTING, ELECTION_MAX_TIMEOUT_SETTING)]
*
* Each election lasts up to ELECTION_DURATION_SETTING.
*/
public static final Setting<TimeValue> ELECTION_INITIAL_TIMEOUT_SETTING = Setting.timeSetting(
ELECTION_INITIAL_TIMEOUT_SETTING_KEY,
TimeValue.timeValueMillis(100),
TimeValue.timeValueMillis(1),
TimeValue.timeValueSeconds(10),
Property.NodeScope
);
public static final Setting<TimeValue> ELECTION_BACK_OFF_TIME_SETTING = Setting.timeSetting(
ELECTION_BACK_OFF_TIME_SETTING_KEY,
TimeValue.timeValueMillis(100),
TimeValue.timeValueMillis(1),
TimeValue.timeValueSeconds(60),
Property.NodeScope
);
public static final Setting<TimeValue> ELECTION_MAX_TIMEOUT_SETTING = Setting.timeSetting(
ELECTION_MAX_TIMEOUT_SETTING_KEY,
TimeValue.timeValueSeconds(10),
TimeValue.timeValueMillis(200),
TimeValue.timeValueSeconds(600),
Property.NodeScope
);
public static final Setting<TimeValue> ELECTION_DURATION_SETTING = Setting.timeSetting(
ELECTION_DURATION_SETTING_KEY,
TimeValue.timeValueMillis(500),
TimeValue.timeValueMillis(1),
TimeValue.timeValueSeconds(600),
Property.NodeScope
);
private final TimeValue initialTimeout;
private final TimeValue backoffTime;
private final TimeValue maxTimeout;
private final TimeValue duration;
private final ThreadPool threadPool;
private final Executor clusterCoordinationExecutor;
private final Random random;
public ElectionSchedulerFactory(Settings settings, Random random, ThreadPool threadPool) {
this.random = random;
this.threadPool = threadPool;
this.clusterCoordinationExecutor = threadPool.executor(Names.CLUSTER_COORDINATION);
initialTimeout = ELECTION_INITIAL_TIMEOUT_SETTING.get(settings);
backoffTime = ELECTION_BACK_OFF_TIME_SETTING.get(settings);
maxTimeout = ELECTION_MAX_TIMEOUT_SETTING.get(settings);
duration = ELECTION_DURATION_SETTING.get(settings);
if (maxTimeout.millis() < initialTimeout.millis()) {
throw new IllegalArgumentException(
format(
"[%s] is [%s], but must be at least [%s] which is [%s]",
ELECTION_MAX_TIMEOUT_SETTING_KEY,
maxTimeout,
ELECTION_INITIAL_TIMEOUT_SETTING_KEY,
initialTimeout
)
);
}
}
/**
* Start the process to schedule repeated election attempts.
*
* @param gracePeriod An initial period to wait before attempting the first election.
* @param scheduledRunnable The action to run each time an election should be attempted.
*/
public Releasable startElectionScheduler(TimeValue gracePeriod, Runnable scheduledRunnable) {
final ElectionScheduler scheduler = new ElectionScheduler();
scheduler.scheduleNextElection(gracePeriod, scheduledRunnable);
return scheduler;
}
@SuppressForbidden(reason = "Argument to Math.abs() is definitely not Long.MIN_VALUE")
private static long nonNegative(long n) {
return n == Long.MIN_VALUE ? 0 : Math.abs(n);
}
/**
* @param randomNumber a randomly-chosen long
* @param upperBound inclusive upper bound
* @return a number in the range (0, upperBound]
*/
// package-private for testing
static long toPositiveLongAtMost(long randomNumber, long upperBound) {
assert 0 < upperBound : upperBound;
return nonNegative(randomNumber) % upperBound + 1;
}
@Override
public String toString() {
return "ElectionSchedulerFactory{"
+ "initialTimeout="
+ initialTimeout
+ ", backoffTime="
+ backoffTime
+ ", maxTimeout="
+ maxTimeout
+ '}';
}
private | ElectionSchedulerFactory |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/config/EndpointUriSetFromSpringTest.java | {
"start": 1242,
"end": 1676
} | class ____ extends SpringRunWithTestSupport {
@Resource(name = "foo:bar")
MockEndpoint endpoint;
@Test
public void testEndpointCreatedWithCorrectUri() throws Exception {
assertNotNull(endpoint, "foo");
assertEquals("foo:bar", endpoint.getEndpointUri(), "foo.getEndpointUri()");
log.info("Found endpoint {} with URI: {}", endpoint, endpoint.getEndpointUri());
}
}
| EndpointUriSetFromSpringTest |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/actuate/web/servlet/JerseyEndpointRequestIntegrationTests.java | {
"start": 1766,
"end": 5393
} | class ____ extends AbstractEndpointRequestIntegrationTests {
@Test
void toLinksWhenApplicationPathSetShouldMatch() {
getContextRunner().withPropertyValues("spring.jersey.application-path=/admin").run((context) -> {
WebTestClient webTestClient = getWebTestClient(context);
webTestClient.get()
.uri("/admin/actuator/")
.exchange()
.expectStatus()
.isEqualTo(expectedStatusWithTrailingSlash());
webTestClient.get().uri("/admin/actuator").exchange().expectStatus().isOk();
});
}
@Test
void toEndpointWhenApplicationPathSetShouldMatch() {
getContextRunner().withPropertyValues("spring.jersey.application-path=/admin").run((context) -> {
WebTestClient webTestClient = getWebTestClient(context);
webTestClient.get().uri("/admin/actuator/e1").exchange().expectStatus().isOk();
});
}
@Test
void toAnyEndpointWhenApplicationPathSetShouldMatch() {
getContextRunner()
.withPropertyValues("spring.jersey.application-path=/admin", "spring.security.user.password=password")
.run((context) -> {
WebTestClient webTestClient = getWebTestClient(context);
webTestClient.get().uri("/admin/actuator/e2").exchange().expectStatus().isUnauthorized();
webTestClient.get()
.uri("/admin/actuator/e2")
.header("Authorization", getBasicAuth())
.exchange()
.expectStatus()
.isOk();
});
}
@Test
void toAnyEndpointShouldMatchServletEndpoint() {
getContextRunner()
.withPropertyValues("spring.security.user.password=password",
"management.endpoints.web.exposure.include=se1")
.run((context) -> {
WebTestClient webTestClient = getWebTestClient(context);
webTestClient.get().uri("/actuator/se1").exchange().expectStatus().isUnauthorized();
webTestClient.get()
.uri("/actuator/se1")
.header("Authorization", getBasicAuth())
.exchange()
.expectStatus()
.isOk();
webTestClient.get().uri("/actuator/se1/list").exchange().expectStatus().isUnauthorized();
webTestClient.get()
.uri("/actuator/se1/list")
.header("Authorization", getBasicAuth())
.exchange()
.expectStatus()
.isOk();
});
}
@Test
void toAnyEndpointWhenApplicationPathSetShouldMatchServletEndpoint() {
getContextRunner()
.withPropertyValues("spring.jersey.application-path=/admin", "spring.security.user.password=password",
"management.endpoints.web.exposure.include=se1")
.run((context) -> {
WebTestClient webTestClient = getWebTestClient(context);
webTestClient.get().uri("/admin/actuator/se1").exchange().expectStatus().isUnauthorized();
webTestClient.get()
.uri("/admin/actuator/se1")
.header("Authorization", getBasicAuth())
.exchange()
.expectStatus()
.isOk();
webTestClient.get().uri("/admin/actuator/se1/list").exchange().expectStatus().isUnauthorized();
webTestClient.get()
.uri("/admin/actuator/se1/list")
.header("Authorization", getBasicAuth())
.exchange()
.expectStatus()
.isOk();
});
}
@Override
protected HttpStatus expectedStatusWithTrailingSlash() {
return HttpStatus.OK;
}
@Override
protected WebApplicationContextRunner createContextRunner() {
return new WebApplicationContextRunner(AnnotationConfigServletWebServerApplicationContext::new)
.withClassLoader(new FilteredClassLoader("org.springframework.web.servlet.DispatcherServlet"))
.withUserConfiguration(JerseyEndpointConfiguration.class)
.withConfiguration(AutoConfigurations.of(JerseyAutoConfiguration.class));
}
@Configuration
@EnableConfigurationProperties(WebEndpointProperties.class)
static | JerseyEndpointRequestIntegrationTests |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/factories/EngineFactory.java | {
"start": 795,
"end": 943
} | class ____ {
@Singleton
Engine v8Engine(CrankShaft crankShaft) {
return new V8Engine(crankShaft);
}
}
// tag::class[]
| EngineFactory |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/SplitterMapTest.java | {
"start": 1081,
"end": 2145
} | class ____ extends ContextTestSupport {
@Test
public void testSplitMap() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:line");
mock.message(0).body().isEqualTo("Hello World");
mock.message(0).header("myKey").isEqualTo("123");
mock.message(1).body().isEqualTo("Bye World");
mock.message(1).header("myKey").isEqualTo("789");
Map<String, String> map = new LinkedHashMap<>();
map.put("123", "Hello World");
map.put("789", "Bye World");
template.sendBody("direct:start", map);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.split(body())
.setHeader("myKey").simple("${body.key}")
.setBody(simple("${body.value}"))
.to("mock:line");
}
};
}
}
| SplitterMapTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_liqing.java | {
"start": 159,
"end": 634
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
ParserConfig config = new ParserConfig();
config.setAutoTypeSupport(true);
String json = "{\"@type\":\"java.util.HashMap\",\"wcChangeAttr\":{\"@type\":\"com.alibaba.json.bvt.bug.Bug_for_liqing.TpFeedBackDO\",\"attributes\":{\"@type\":\"java.util.concurrent.ConcurrentHashMap\"},\"wcStatus\":102B}}";
JSON.parse(json, config);
}
public static | Bug_for_liqing |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/ColumnTransformersAnnotation.java | {
"start": 740,
"end": 1674
} | class ____ implements ColumnTransformers, RepeatableContainer<ColumnTransformer> {
private org.hibernate.annotations.ColumnTransformer[] value;
public ColumnTransformersAnnotation(ModelsContext modelContext) {
}
public ColumnTransformersAnnotation(ColumnTransformers annotation, ModelsContext modelContext) {
this.value = extractJdkValue( annotation, HibernateAnnotations.COLUMN_TRANSFORMERS, "value", modelContext );
}
public ColumnTransformersAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (ColumnTransformer[]) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return ColumnTransformers.class;
}
@Override
public org.hibernate.annotations.ColumnTransformer[] value() {
return value;
}
public void value(org.hibernate.annotations.ColumnTransformer[] value) {
this.value = value;
}
}
| ColumnTransformersAnnotation |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/savedrequest/DefaultSavedRequest.java | {
"start": 1483,
"end": 1765
} | class ____ used by
* {@link org.springframework.security.web.authentication.AbstractAuthenticationProcessingFilter}
* and {@link org.springframework.security.web.savedrequest.SavedRequestAwareWrapper} to
* reproduce the request after successful authentication. An instance of this | is |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java | {
"start": 783,
"end": 3203
} | class ____ extends StreamInput {
private byte[] bytes;
private int pos;
private int limit;
public ByteArrayStreamInput() {
reset(BytesRef.EMPTY_BYTES);
}
public ByteArrayStreamInput(byte[] bytes) {
reset(bytes);
}
@Override
public String readString() throws IOException {
final int chars = readArraySize();
String string = tryReadStringFromBytes(bytes, pos, limit, chars);
if (string != null) {
return string;
}
return doReadString(chars);
}
@Override
public int read() throws IOException {
if (limit - pos <= 0) {
return -1;
}
return readByte() & 0xFF;
}
public void reset(byte[] bytes) {
reset(bytes, 0, bytes.length);
}
public int length() {
return limit;
}
public int getPosition() {
return pos;
}
public void setPosition(int pos) {
this.pos = pos;
}
public void reset(byte[] bytes, int offset, int len) {
this.bytes = bytes;
pos = offset;
limit = offset + len;
}
public void skipBytes(long count) {
pos += (int) count;
}
@Override
public long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
int available = available();
if (n < available) {
pos += (int) n;
return n;
}
pos = limit;
return available;
}
@Override
public void close() {
// No-op
}
@Override
public int available() {
return limit - pos;
}
@Override
protected void ensureCanReadBytes(int length) throws EOFException {
final int available = limit - pos;
if (length > available) {
throwEOF(length, available);
}
}
@Override
public byte readByte() {
return bytes[pos++];
}
@Override
public void readBytes(byte[] b, int offset, int len) {
System.arraycopy(bytes, pos, b, offset, len);
pos += len;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
final int available = limit - pos;
if (available <= 0) {
return -1;
}
int toRead = Math.min(len, available);
readBytes(b, off, toRead);
return toRead;
}
}
| ByteArrayStreamInput |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java | {
"start": 5865,
"end": 29030
} | class ____ extends AbstractBinder {
private Configuration conf = new YarnConfiguration();
@Override
protected void configure() {
try {
userName = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException ioe) {
throw new RuntimeException("Unable to get current user name "
+ ioe.getMessage(), ioe);
}
notUserName = userName + "abc123";
conf = new YarnConfiguration();
conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName);
rm = new MockRM(conf);
bind(rm).to(ResourceManager.class).named("rm");
bind(conf).to(Configuration.class).named("conf");
rmWebService = new RMWebServices(rm, conf);
bind(rmWebService).to(RMWebServices.class);
request = mock(HttpServletRequest.class);
bind(request).to(HttpServletRequest.class);
Principal principal = () -> userName;
when(request.getUserPrincipal()).thenReturn(principal);
HttpServletResponse response = mock(HttpServletResponse.class);
bind(response).to(HttpServletResponse.class);
rmWebService.setResponse(response);
}
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
}
public TestRMWebServicesNodeLabels() {
}
private WebTarget getClusterWebResource() {
return targetWithJsonObject().
register(NodeLabelsInfoReader.class).
register(LabelsToNodesInfoReader.class).
register(NodeToLabelsInfoReader.class).
path(PATH_WS).path(PATH_V1).path(PATH_CLUSTER);
}
private Response get(String path) {
return getClusterWebResource()
.path(path)
.queryParam(QUERY_USER_NAME, userName)
.request(MediaType.APPLICATION_JSON).get(Response.class);
}
private Response get(String path, MultivaluedMap<String, String> queryParams) {
WebTarget webTarget = getClusterWebResource()
.path(path).queryParam(QUERY_USER_NAME, userName);
for(Map.Entry<String, List<String>> param : queryParams.entrySet()) {
for (String item : param.getValue()) {
webTarget = webTarget.queryParam(param.getKey(), item);
}
}
return webTarget.request(MediaType.APPLICATION_JSON).get(Response.class);
}
private Response post(String path, String queryUserName, Object payload,
Class<?> payloadClass) throws Exception {
return getClusterWebResource()
.path(path)
.queryParam(QUERY_USER_NAME, queryUserName)
.request(MediaType.APPLICATION_JSON)
.post(Entity.json(toJson(payload, payloadClass)), Response.class);
}
private Response post(String path, String queryUserName, Object payload,
Class<?> payloadClass, MultivaluedMap<String, String> queryParams) throws Exception {
WebTarget webTarget = getClusterWebResource()
.path(path).queryParam(QUERY_USER_NAME, queryUserName);
for(Map.Entry<String, List<String>> param : queryParams.entrySet()) {
List<String> values = param.getValue();
for (String value : values) {
webTarget = webTarget.queryParam(param.getKey(), value);
}
}
return webTarget.request(MediaType.APPLICATION_JSON)
.post(Entity.entity(toJson(payload, payloadClass),
MediaType.APPLICATION_JSON), Response.class);
}
@Test
public void testNodeLabels() throws Exception {
Response response;
// Add a label
response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY)));
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfo(response.readEntity(NodeLabelsInfo.class), Lists.newArrayList(
Pair.of(LABEL_A, true)));
// Add another
response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_B, false)));
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
// Verify exclusivity for 'b' as false
assertNodeLabelsInfo(response.readEntity(NodeLabelsInfo.class),
Lists.newArrayList(
Pair.of(LABEL_A, true),
Pair.of(LABEL_B, false)));
// Add labels to a node
response = replaceLabelsOnNode(NODE_0, LABEL_A);
assertHttp200(response);
// Add labels to another node
response = replaceLabelsOnNode(NODE_1, LABEL_B);
assertHttp200(response);
// Add labels to another node
response = replaceLabelsOnNode(NODE_2, LABEL_B);
assertHttp200(response);
// Verify all, using get-labels-to-Nodes
response = getNodeLabelMappings();
assertApplicationJsonUtf8Response(response);
LabelsToNodesInfo labelsToNodesInfo = response.readEntity(LabelsToNodesInfo.class);
assertLabelsToNodesInfo(labelsToNodesInfo, 2, Lists.newArrayList(
Pair.of(Pair.of(LABEL_B, false), Lists.newArrayList(NODE_1, NODE_2)),
Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList(NODE_0))
));
// Verify, using get-labels-to-Nodes for specified set of labels
response = getNodeLabelMappingsByLabels(LABEL_A);
assertApplicationJsonUtf8Response(response);
labelsToNodesInfo = response.readEntity(LabelsToNodesInfo.class);
assertLabelsToNodesInfo(labelsToNodesInfo, 1, Lists.newArrayList(
Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList(NODE_0))
));
// Verify
response = getLabelsOfNode(NODE_0);
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoContains(response.readEntity(NodeLabelsInfo.class),
Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
// Replace
response = replaceLabelsOnNode(NODE_0, LABEL_B);
assertHttp200(response);
// Verify
response = getLabelsOfNode(NODE_0);
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoContains(response.readEntity(NodeLabelsInfo.class),
Pair.of(LABEL_B, false));
// Replace labels using node-to-labels
response = replaceNodeToLabels(Lists.newArrayList(Pair.of(NODE_0,
Lists.newArrayList(LABEL_A))));
assertHttp200(response);
// Verify, using node-to-labels
response = getNodeToLabels();
assertApplicationJsonUtf8Response(response);
NodeToLabelsInfo nodeToLabelsInfo = response.readEntity(NodeToLabelsInfo.class);
NodeLabelsInfo nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
assertNodeLabelsSize(nodeLabelsInfo, 1);
assertNodeLabelsInfoContains(nodeLabelsInfo, Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
// Remove all
response = replaceLabelsOnNode(NODE_0, "");
assertHttp200(response);
// Verify
response = getLabelsOfNode(NODE_0);
assertApplicationJsonUtf8Response(response);
assertNodeLabelsSize(response.readEntity(NodeLabelsInfo.class), 0);
// Add a label back for auth tests
response = replaceLabelsOnNode(NODE_0, LABEL_A);
assertHttp200(response);
// Verify
response = getLabelsOfNode(NODE_0);
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoContains(response.readEntity(NodeLabelsInfo.class),
Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
// Auth fail replace labels on node
Principal principal2 = () -> notUserName;
when(request.getUserPrincipal()).thenReturn(principal2);
response = replaceLabelsOnNodeWithUserName(NODE_0, notUserName, LABEL_B);
assertHttp401(response);
// Verify
response = getLabelsOfNode(NODE_0);
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoContains(response.readEntity(NodeLabelsInfo.class),
Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
// Fail to add a label with wrong user
response = addNodeLabelsWithUser(Lists.newArrayList(Pair.of("c", DEFAULT_NL_EXCLUSIVITY)),
notUserName);
assertHttp401(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsSize(response.readEntity(NodeLabelsInfo.class), 2);
// Remove cluster label (succeed, we no longer need it)
Principal principal3 = () -> userName;
when(request.getUserPrincipal()).thenReturn(principal3);
response = removeNodeLabel(LABEL_B);
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfo(response.readEntity(NodeLabelsInfo.class),
Lists.newArrayList(Pair.of(LABEL_A, true)));
// Remove cluster label with post
response = removeNodeLabel(LABEL_A);
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
nodeLabelsInfo = response.readEntity(NodeLabelsInfo.class);
assertEquals(0, nodeLabelsInfo.getNodeLabels().size());
// Following test cases are to test replace when distributed node label
// configuration is on
// Reset for testing : add cluster labels
response = addNodeLabels(Lists.newArrayList(
Pair.of(LABEL_X, false), Pair.of(LABEL_Y, false)));
assertHttp200(response);
// Reset for testing : Add labels to a node
response = replaceLabelsOnNode(NODE_0, LABEL_Y);
assertHttp200(response);
//setting rmWebService for non-centralized NodeLabel Configuration
rmWebService.isCentralizedNodeLabelConfiguration = false;
// Case1 : Replace labels using node-to-labels
response = replaceNodeToLabels(Lists.newArrayList(Pair.of(NODE_0,
Lists.newArrayList(LABEL_X))));
assertHttp404(response);
// Verify, using node-to-labels that previous operation has failed
response = getNodeToLabels();
assertApplicationJsonUtf8Response(response);
nodeToLabelsInfo = response.readEntity(NodeToLabelsInfo.class);
nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
assertNodeLabelsSize(nodeLabelsInfo, 1);
assertNodeLabelsInfoDoesNotContain(nodeLabelsInfo, Pair.of(LABEL_X, false));
// Case2 : failure to Replace labels using replace-labels
response = replaceLabelsOnNode(NODE_0, LABEL_X);
assertHttp404(response);
// Verify, using node-to-labels that previous operation has failed
response = getNodeToLabels();
assertApplicationJsonUtf8Response(response);
nodeToLabelsInfo = response.readEntity(NodeToLabelsInfo.class);
nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
assertNodeLabelsSize(nodeLabelsInfo, 1);
assertNodeLabelsInfoDoesNotContain(nodeLabelsInfo, Pair.of(LABEL_X, false));
// Case3 : Remove cluster label should be successful
response = removeNodeLabel(LABEL_X);
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoAtPosition(response.readEntity(NodeLabelsInfo.class), Pair.of(LABEL_Y,
false), 0);
// Remove y
response = removeNodeLabel(LABEL_Y);
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsSize(response.readEntity(NodeLabelsInfo.class), 0);
// add a new nodelabel with exclusivity=false
response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_Z, false)));
assertHttp200(response);
// Verify
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
assertNodeLabelsInfoAtPosition(response.readEntity(NodeLabelsInfo.class),
Pair.of(LABEL_Z, false), 0);
assertNodeLabelsSize(nodeLabelsInfo, 1);
}
private void assertLabelsToNodesInfo(LabelsToNodesInfo labelsToNodesInfo, int size,
List<Pair<Pair<String, Boolean>, List<String>>> nodeLabelsToNodesList) {
Map<NodeLabelInfo, NodeIDsInfo> labelsToNodes = labelsToNodesInfo.getLabelsToNodes();
assertNotNull(labelsToNodes, "Labels to nodes mapping should not be null.");
assertEquals(size, labelsToNodes.size(), "Size of label to nodes mapping is not the expected.");
for (Pair<Pair<String, Boolean>, List<String>> nodeLabelToNodes : nodeLabelsToNodesList) {
Pair<String, Boolean> expectedNLData = nodeLabelToNodes.getLeft();
List<String> expectedNodes = nodeLabelToNodes.getRight();
NodeLabelInfo expectedNLInfo = new NodeLabelInfo(expectedNLData.getLeft(),
expectedNLData.getRight());
NodeIDsInfo actualNodes = labelsToNodes.get(expectedNLInfo);
assertNotNull(actualNodes, String.format("Node info not found. Expected NodeLabel data: %s",
expectedNLData));
for (String expectedNode : expectedNodes) {
assertTrue(actualNodes.getNodeIDs().contains(expectedNode),
String.format("Can't find node ID in actual Node IDs list: %s",
actualNodes.getNodeIDs()));
}
}
}
private void assertNodeLabelsInfo(NodeLabelsInfo nodeLabelsInfo,
List<Pair<String, Boolean>> nlInfos) {
assertEquals(nlInfos.size(), nodeLabelsInfo.getNodeLabels().size());
for (int i = 0; i < nodeLabelsInfo.getNodeLabelsInfo().size(); i++) {
Pair<String, Boolean> expected = nlInfos.get(i);
NodeLabelInfo actual = nodeLabelsInfo.getNodeLabelsInfo().get(i);
LOG.debug("Checking NodeLabelInfo: {}", actual);
assertEquals(expected.getLeft(), actual.getName());
assertEquals(expected.getRight(), actual.getExclusivity());
}
}
private void assertNodeLabelsInfoAtPosition(NodeLabelsInfo nodeLabelsInfo, Pair<String,
Boolean> nlInfo, int pos) {
NodeLabelInfo actual = nodeLabelsInfo.getNodeLabelsInfo().get(pos);
LOG.debug("Checking NodeLabelInfo: {}", actual);
assertEquals(nlInfo.getLeft(), actual.getName());
assertEquals(nlInfo.getRight(), actual.getExclusivity());
}
private void assertNodeLabelsInfoContains(NodeLabelsInfo nodeLabelsInfo,
Pair<String, Boolean> nlInfo) {
NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
assertTrue(nodeLabelsInfo.getNodeLabelsInfo().contains(nodeLabelInfo),
String.format("Cannot find nodeLabelInfo '%s' among items of node label info list:" +
" %s", nodeLabelInfo, nodeLabelsInfo.getNodeLabelsInfo()));
}
private void assertNodeLabelsInfoDoesNotContain(NodeLabelsInfo nodeLabelsInfo, Pair<String,
Boolean> nlInfo) {
NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
assertFalse(nodeLabelsInfo.getNodeLabelsInfo().contains(nodeLabelInfo),
String.format("Should have not found nodeLabelInfo '%s' among " +
"items of node label info list: %s", nodeLabelInfo, nodeLabelsInfo.getNodeLabelsInfo()));
}
private void assertNodeLabelsSize(NodeLabelsInfo nodeLabelsInfo, int expectedSize) {
assertEquals(expectedSize, nodeLabelsInfo.getNodeLabelsInfo().size());
}
private Response replaceNodeToLabels(List<Pair<String, List<String>>> nodeToLabelInfos)
throws Exception {
NodeToLabelsEntryList nodeToLabelsEntries = new NodeToLabelsEntryList();
for (Pair<String, List<String>> nodeToLabelInfo : nodeToLabelInfos) {
ArrayList<String> labelList = new ArrayList<>(nodeToLabelInfo.getRight());
String nodeId = nodeToLabelInfo.getLeft();
NodeToLabelsEntry nli = new NodeToLabelsEntry(nodeId, labelList);
nodeToLabelsEntries.getNodeToLabels().add(nli);
}
return post(PATH_REPLACE_NODE_TO_LABELS, userName, nodeToLabelsEntries, NodeToLabelsEntryList.class);
}
private Response getNodeLabelMappings() {
return get(PATH_LABEL_MAPPINGS);
}
private Response getNodeLabelMappingsByLabels(String... labelNames) {
MultivaluedMap params = createMultiValuedMap(labelNames);
return get(PATH_LABEL_MAPPINGS, params);
}
private Response replaceLabelsOnNode(String node, String... labelNames) throws Exception {
return replaceLabelsOnNodeWithUserName(node, userName, labelNames);
}
private Response replaceLabelsOnNodeWithUserName(String node,
String userName, String... labelNames) throws Exception {
LOG.info("Replacing labels on node '{}', label(s): {}", node, labelNames);
MultivaluedMap params = createMultiValuedMap(labelNames);
String path = UriBuilder.fromPath(PATH_NODES).path(node)
.path(PATH_REPLACE_LABELS).build().toString();
return post(path, userName, null, null, params);
}
private static MultivaluedMap createMultiValuedMap(String[] labelNames) {
MultivaluedMap<String, String> params = new MultivaluedHashMap();
for (String labelName : labelNames) {
params.add("labels", labelName);
}
return params;
}
private Response removeNodeLabel(String... labelNames) throws Exception {
MultivaluedMap params = createMultiValuedMap(labelNames);
return post(PATH_REMOVE_LABELS, userName, null, null, params);
}
private Response getLabelsOfNode(String node) {
String path = UriBuilder.fromPath(PATH_NODES).path(node)
.path(PATH_GET_LABELS).build().toString();
return get(path);
}
private Response getNodeLabels() {
return get(PATH_GET_NODE_LABELS);
}
private Response getNodeToLabels() {
return get(PATH_GET_NODE_TO_LABELS);
}
private Response addNodeLabels(List<Pair<String, Boolean>> nlInfos) throws Exception {
return addNodeLabelsInternal(nlInfos, userName);
}
private Response addNodeLabelsWithUser(List<Pair<String, Boolean>> nlInfos,
String userName) throws Exception {
return addNodeLabelsInternal(nlInfos, userName);
}
private Response addNodeLabelsInternal(List<Pair<String, Boolean>> nlInfos,
String userName) throws Exception {
NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo();
for (Pair<String, Boolean> nlInfo : nlInfos) {
NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
nodeLabelsInfo.getNodeLabelsInfo().add(nodeLabelInfo);
}
return post(PATH_ADD_NODE_LABELS, userName, nodeLabelsInfo, NodeLabelsInfo.class);
}
private void assertApplicationJsonUtf8Response(Response response) {
assertEquals(MediaType.APPLICATION_JSON_TYPE + ";" + JettyUtils.UTF_8,
response.getMediaType().toString());
}
private void assertHttp200(Response response) {
assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
}
private void assertHttp401(Response response) {
assertEquals(Response.Status.UNAUTHORIZED.getStatusCode(), response.getStatus());
}
private void assertHttp404(Response response) {
assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus());
}
@Test
public void testLabelInvalidAddition()
throws Exception {
// Add a invalid label
Response response = addNodeLabels(Lists.newArrayList(Pair.of("a&",
DEFAULT_NL_EXCLUSIVITY)));
String expectedMessage =
"label name should only contains"
+ " {0-9, a-z, A-Z, -, _} and should not started with"
+ " {-,_}, now it is= a&";
validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testLabelChangeExclusivity()
throws Exception {
Response response;
response = addNodeLabels(Lists.newArrayList(Pair.of("newLabel", DEFAULT_NL_EXCLUSIVITY)));
assertHttp200(response);
// new info and change exclusivity
response = addNodeLabels(Lists.newArrayList(Pair.of("newLabel", false)));
String expectedMessage =
"Exclusivity cannot be modified for an existing"
+ " label with : <newLabel:exclusivity=false>";
validateJsonExceptionContent(response, expectedMessage);
}
private void validateJsonExceptionContent(Response response,
String expectedMessage)
throws JSONException {
assertEquals(BAD_REQUEST_CODE, response.getStatus());
JSONObject msg = response.readEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
String message = exception.getString("message");
assertEquals(3, exception.length(), "incorrect number of elements");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
WebServicesTestUtils.checkStringContains("exception message",
expectedMessage, message);
}
@Test
public void testLabelInvalidReplace()
throws Exception {
Response response;
// replace label which doesn't exist
response = replaceLabelsOnNode(NODE_0, "idontexist");
String expectedMessage =
"Not all labels being replaced contained by known label"
+ " collections, please check, new labels=[idontexist]";
validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testLabelInvalidRemove()
throws Exception {
Response response;
response = removeNodeLabel("ireallydontexist");
String expectedMessage =
"Node label=ireallydontexist to be"
+ " removed doesn't existed in cluster node labels"
+ " collection.";
validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testNodeLabelPartitionInfo() throws Exception {
Response response;
// Add a node label
response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY)));
assertHttp200(response);
// Verify partition info in get-node-labels
response = getNodeLabels();
assertApplicationJsonUtf8Response(response);
NodeLabelsInfo nodeLabelsInfo = response.readEntity(NodeLabelsInfo.class);
assertNodeLabelsSize(nodeLabelsInfo, 1);
for (NodeLabelInfo nl : nodeLabelsInfo.getNodeLabelsInfo()) {
assertEquals(LABEL_A, nl.getName());
assertTrue(nl.getExclusivity());
assertNotNull(nl.getPartitionInfo());
assertNotNull(nl.getPartitionInfo().getResourceAvailable());
}
// Add node label to a node
response = replaceLabelsOnNode("nodeId:0", LABEL_A);
assertHttp200(response);
// Verify partition info in label-mappings
response = getNodeLabelMappings();
assertApplicationJsonUtf8Response(response);
LabelsToNodesInfo labelsToNodesInfo = response.readEntity(LabelsToNodesInfo.class);
assertLabelsToNodesInfo(labelsToNodesInfo, 1, Lists.newArrayList(
Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList("nodeId:0"))
));
NodeIDsInfo nodes = labelsToNodesInfo.getLabelsToNodes().get(new NodeLabelInfo(LABEL_A));
assertNotNull(nodes.getPartitionInfo());
assertNotNull(nodes.getPartitionInfo().getResourceAvailable());
}
@SuppressWarnings("rawtypes")
private String toJson(Object obj, Class klass) throws Exception {
if (obj == null) {
return null;
}
JettisonJaxbContext jettisonJaxbContext = new JettisonJaxbContext(klass);
JettisonMarshaller jsonMarshaller = jettisonJaxbContext.createJsonMarshaller();
StringWriter stringWriter = new StringWriter();
jsonMarshaller.marshallToJSON(obj, stringWriter);
return stringWriter.toString();
}
}
| JerseyBinder |
java | apache__rocketmq | broker/src/main/java/org/apache/rocketmq/broker/pop/PopConsumerLockService.java | {
"start": 1339,
"end": 3114
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.ROCKETMQ_POP_LOGGER_NAME);
private final long timeout;
private final ConcurrentMap<String /* groupId@topicId */, TimedLock> lockTable;
public PopConsumerLockService(long timeout) {
this.timeout = timeout;
this.lockTable = new ConcurrentHashMap<>();
}
public boolean tryLock(String groupId, String topicId) {
return Objects.requireNonNull(ConcurrentHashMapUtils.computeIfAbsent(lockTable,
groupId + PopAckConstants.SPLIT + topicId, s -> new TimedLock())).tryLock();
}
public void unlock(String groupId, String topicId) {
TimedLock lock = lockTable.get(groupId + PopAckConstants.SPLIT + topicId);
if (lock != null) {
lock.unlock();
}
}
// For retry topics, should lock origin group and topic
public boolean isLockTimeout(String groupId, String topicId) {
topicId = KeyBuilder.parseNormalTopic(topicId, groupId);
TimedLock lock = lockTable.get(groupId + PopAckConstants.SPLIT + topicId);
return lock == null || System.currentTimeMillis() - lock.getLockTime() > timeout;
}
public void removeTimeout() {
Iterator<Map.Entry<String, TimedLock>> iterator = lockTable.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, TimedLock> entry = iterator.next();
if (System.currentTimeMillis() - entry.getValue().getLockTime() > timeout) {
log.info("PopConsumerLockService remove timeout lock, " +
"key={}, locked={}", entry.getKey(), entry.getValue().lock.get());
iterator.remove();
}
}
}
static | PopConsumerLockService |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/HqlOrderExpressionVisitorUnitTests.java | {
"start": 1907,
"end": 11991
} | class ____ {
@PersistenceContext EntityManager em;
@Test
void genericFunctions() {
assertThat(renderOrderBy(JpaSort.unsafe("LENGTH(firstname)"), "var_1"))
.startsWithIgnoringCase("order by character_length(var_1.firstname) asc");
assertThat(renderOrderBy(JpaSort.unsafe("char_length(firstname)"), "var_1"))
.startsWithIgnoringCase("order by char_length(var_1.firstname) asc");
assertThat(renderOrderBy(JpaSort.unsafe("nlssort(firstname, 'NLS_SORT = XGERMAN_DIN_AI')"), "var_1"))
.startsWithIgnoringCase("order by nlssort(var_1.firstname, 'NLS_SORT = XGERMAN_DIN_AI')");
}
@Test // GH-3172
void cast() {
assertThatExceptionOfType(UnsupportedOperationException.class)
.isThrownBy(() -> renderOrderBy(JpaSort.unsafe("cast(emailAddress as date)"), "var_1"));
}
@Test // GH-3172
void extract() {
assertThat(renderOrderBy(JpaSort.unsafe("EXTRACT(DAY FROM createdAt)"), "var_1"))
.startsWithIgnoringCase("order by extract(day from var_1.createdAt)");
assertThat(renderOrderBy(JpaSort.unsafe("WEEK(createdAt)"), "var_1"))
.startsWithIgnoringCase("order by extract(week from var_1.createdAt)");
}
@Test // GH-3172
void trunc() {
assertThat(renderOrderBy(JpaSort.unsafe("TRUNC(age)"), "var_1"))
.startsWithIgnoringCase("order by trunc(var_1.age)");
}
@Test // GH-3172
void upperLower() {
assertThat(renderOrderBy(JpaSort.unsafe("upper(firstname)"), "var_1"))
.startsWithIgnoringCase("order by upper(var_1.firstname)");
assertThat(renderOrderBy(JpaSort.unsafe("lower(firstname)"), "var_1"))
.startsWithIgnoringCase("order by lower(var_1.firstname)");
}
@Test // GH-3172
void substring() {
assertThat(renderOrderBy(JpaSort.unsafe("substring(emailAddress, 0, 3)"), "var_1"))
.startsWithIgnoringCase("order by substring(var_1.emailAddress, 0, 3) asc");
assertThat(renderOrderBy(JpaSort.unsafe("substring(emailAddress, 0)"), "var_1"))
.startsWithIgnoringCase("order by substring(var_1.emailAddress, 0) asc");
}
@Test // GH-3172
void repeat() {
assertThat(renderOrderBy(JpaSort.unsafe("repeat('a', 5)"), "var_1"))
.startsWithIgnoringCase("order by repeat('a', 5) asc");
}
@Test // GH-3172
void literals() {
assertThat(renderOrderBy(JpaSort.unsafe("age + 1"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1l"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1L"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1.1"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1.1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1.1f"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1.1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1.1bi"), "var_1"))
.startsWithIgnoringCase("order by var_1.age + 1.1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1.1bd"), "var_1"))
.startsWithIgnoringCase("order by var_1.age + 1.1");
assertThat(renderOrderBy(JpaSort.unsafe("age + 0x12"), "var_1")).startsWithIgnoringCase("order by var_1.age + 18");
}
@Test // GH-3172
void temporalLiterals() {
// JDBC
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {ts '2024-01-01 12:34:56'}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '2024-01-01T12:34:56'");
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {ts '2012-01-03 09:00:00.000000001'}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '2012-01-03T09:00:00.000000001'");
// Hibernate NPE
assertThatIllegalArgumentException()
.isThrownBy(() -> renderOrderBy(JpaSort.unsafe("createdAt + {t '12:34:56'}"), "var_1"));
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {d '2024-01-01'}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '2024-01-01'");
// JPQL
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {ts 2024-01-01 12:34:56}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '2024-01-01T12:34:56'");
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {t 12:34:56}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '12:34:56'");
assertThat(renderOrderBy(JpaSort.unsafe("createdAt + {d 2024-01-01}"), "var_1"))
.startsWithIgnoringCase("order by var_1.createdAt + '2024-01-01'");
}
@Test // GH-3172
void arithmetic() {
// Hibernate representation bugs, should be sum(var_1.age)
assertThat(renderOrderBy(JpaSort.unsafe("sum(age)"), "var_1")).startsWithIgnoringCase("order by sum()");
assertThat(renderOrderBy(JpaSort.unsafe("min(age)"), "var_1")).startsWithIgnoringCase("order by min()");
assertThat(renderOrderBy(JpaSort.unsafe("max(age)"), "var_1")).startsWithIgnoringCase("order by max()");
assertThat(renderOrderBy(JpaSort.unsafe("age"), "var_1")).startsWithIgnoringCase("order by var_1.age");
assertThat(renderOrderBy(JpaSort.unsafe("age + 1"), "var_1")).startsWithIgnoringCase("order by var_1.age + 1");
assertThat(renderOrderBy(JpaSort.unsafe("ABS(age) + 1"), "var_1"))
.startsWithIgnoringCase("order by abs(var_1.age) + 1");
assertThat(renderOrderBy(JpaSort.unsafe("neg(active)"), "var_1"))
.startsWithIgnoringCase("order by neg(var_1.active)");
assertThat(renderOrderBy(JpaSort.unsafe("abs(age)"), "var_1")).startsWithIgnoringCase("order by abs(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("ceiling(age)"), "var_1"))
.startsWithIgnoringCase("order by ceiling(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("floor(age)"), "var_1"))
.startsWithIgnoringCase("order by floor(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("round(age)"), "var_1"))
.startsWithIgnoringCase("order by round(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("prod(age, 1)"), "var_1"))
.startsWithIgnoringCase("order by prod(var_1.age, 1)");
assertThat(renderOrderBy(JpaSort.unsafe("prod(age, age)"), "var_1"))
.startsWithIgnoringCase("order by prod(var_1.age, var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("diff(age, 1)"), "var_1"))
.startsWithIgnoringCase("order by diff(var_1.age, 1)");
assertThat(renderOrderBy(JpaSort.unsafe("quot(age, 1)"), "var_1"))
.startsWithIgnoringCase("order by quot(var_1.age, 1)");
assertThat(renderOrderBy(JpaSort.unsafe("mod(age, 1)"), "var_1"))
.startsWithIgnoringCase("order by mod(var_1.age, 1)");
assertThat(renderOrderBy(JpaSort.unsafe("sqrt(age)"), "var_1")).startsWithIgnoringCase("order by sqrt(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("exp(age)"), "var_1")).startsWithIgnoringCase("order by exp(var_1.age)");
assertThat(renderOrderBy(JpaSort.unsafe("ln(age)"), "var_1")).startsWithIgnoringCase("order by ln(var_1.age)");
}
@Test // GH-3172
@Disabled("HHH-19075")
void trim() {
assertThat(renderOrderBy(JpaSort.unsafe("trim(leading '.' from lastname)"), "var_1"))
.startsWithIgnoringCase("order by repeat('a', 5) asc");
}
@Test // GH-3172
void groupedExpression() {
assertThat(renderOrderBy(JpaSort.unsafe("(lastname)"), "var_1")).startsWithIgnoringCase("order by var_1.lastname");
}
@Test // GH-3172
void tupleExpression() {
assertThat(renderOrderBy(JpaSort.unsafe("(firstname, lastname)"), "var_1"))
.startsWithIgnoringCase("order by var_1.firstname, var_1.lastname");
}
@Test // GH-3172
void concat() {
assertThat(renderOrderBy(JpaSort.unsafe("firstname || lastname"), "var_1"))
.startsWithIgnoringCase("order by concat(var_1.firstname, var_1.lastname)");
}
@Test // GH-3172
void pathBased() {
String query = renderQuery(JpaSort.unsafe("manager.firstname"), "var_1");
assertThat(query).contains("from org.springframework.data.jpa.domain.sample.User var_1 left join var_1.manager");
assertThat(query).contains(".firstname asc nulls last");
}
@Test // GH-3172
void caseSwitch() {
assertThat(renderOrderBy(JpaSort.unsafe("case firstname when 'Oliver' then 'A' else firstname end"), "var_1"))
.startsWithIgnoringCase("order by case var_1.firstname when 'Oliver' then 'A' else var_1.firstname end");
assertThat(renderOrderBy(
JpaSort.unsafe("case firstname when 'Oliver' then 'A' when 'Joachim' then 'z' else firstname end"), "var_1"))
.startsWithIgnoringCase(
"order by case var_1.firstname when 'Oliver' then 'A' when 'Joachim' then 'z' else var_1.firstname end");
assertThat(renderOrderBy(JpaSort.unsafe("case when age < 31 then 'A' else firstname end"), "var_1"))
.startsWithIgnoringCase("order by case when var_1.age < 31 then 'A' else var_1.firstname end");
assertThat(
renderOrderBy(JpaSort.unsafe("case when firstname not in ('Oliver', 'Dave') then 'A' else firstname end"),
"var_1"))
.startsWithIgnoringCase(
"order by case when var_1.firstname not in ('Oliver', 'Dave') then 'A' else var_1.firstname end");
}
private String renderOrderBy(JpaSort sort, String alias) {
String query = renderQuery(sort, alias);
String lowerCase = query.toLowerCase(Locale.ROOT);
int index = lowerCase.indexOf("order by");
if (index != -1) {
return query.substring(index);
}
return "";
}
CriteriaQuery<User> createQuery(JpaSort sort, String alias) {
CriteriaQuery<User> query = em.getCriteriaBuilder().createQuery(User.class);
Selection<User> from = query.from(User.class).alias(alias);
HqlOrderExpressionVisitor extractor = new HqlOrderExpressionVisitor(em.getCriteriaBuilder(), (Path<?>) from,
QueryUtils::toExpressionRecursively);
Expression<?> expression = extractor.createCriteriaExpression(sort.stream().findFirst().get());
return query.select(from).orderBy(em.getCriteriaBuilder().asc(expression, Nulls.NONE));
}
@SuppressWarnings("rawtypes")
String renderQuery(JpaSort sort, String alias) {
CriteriaQuery<User> q = createQuery(sort, alias);
SqmSelectStatement s = (SqmSelectStatement) q;
StringBuilder builder = new StringBuilder();
s.appendHqlString(builder, SqmRenderContext.simpleContext());
return builder.toString();
}
}
| HqlOrderExpressionVisitorUnitTests |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/LookupJoinHarnessTest.java | {
"start": 2823,
"end": 11492
} | class ____ {
private final TypeSerializer<RowData> inSerializer =
new RowDataSerializer(
DataTypes.INT().getLogicalType(), DataTypes.STRING().getLogicalType());
private final RowDataHarnessAssertor assertor =
new RowDataHarnessAssertor(
new LogicalType[] {
DataTypes.INT().getLogicalType(),
DataTypes.STRING().getLogicalType(),
DataTypes.INT().getLogicalType(),
DataTypes.STRING().getLogicalType()
});
@Test
void testTemporalInnerJoin() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createHarness(JoinType.INNER_JOIN, FilterOnTable.WITHOUT_FILTER);
testHarness.open();
testHarness.processElement(insertRecord(1, "a"));
testHarness.processElement(insertRecord(2, "b"));
testHarness.processElement(insertRecord(3, "c"));
testHarness.processElement(insertRecord(4, "d"));
testHarness.processElement(insertRecord(5, "e"));
testHarness.processElement(insertRecord(6, null));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, "a", 1, "Julian"));
expectedOutput.add(insertRecord(3, "c", 3, "Jark"));
expectedOutput.add(insertRecord(3, "c", 3, "Jackson"));
expectedOutput.add(insertRecord(4, "d", 4, "Fabian"));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testTemporalInnerJoinWithFilter() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createHarness(JoinType.INNER_JOIN, FilterOnTable.WITH_FILTER);
testHarness.open();
testHarness.processElement(insertRecord(1, "a"));
testHarness.processElement(insertRecord(2, "b"));
testHarness.processElement(insertRecord(3, "c"));
testHarness.processElement(insertRecord(4, "d"));
testHarness.processElement(insertRecord(5, "e"));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, "a", 1, "Julian"));
expectedOutput.add(insertRecord(3, "c", 3, "Jackson"));
expectedOutput.add(insertRecord(4, "d", 4, "Fabian"));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testTemporalLeftJoin() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createHarness(JoinType.LEFT_JOIN, FilterOnTable.WITHOUT_FILTER);
testHarness.open();
testHarness.processElement(insertRecord(1, "a"));
testHarness.processElement(insertRecord(2, "b"));
testHarness.processElement(insertRecord(3, "c"));
testHarness.processElement(insertRecord(4, "d"));
testHarness.processElement(insertRecord(5, "e"));
testHarness.processElement(insertRecord(6, null));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, "a", 1, "Julian"));
expectedOutput.add(insertRecord(2, "b", null, null));
expectedOutput.add(insertRecord(3, "c", 3, "Jark"));
expectedOutput.add(insertRecord(3, "c", 3, "Jackson"));
expectedOutput.add(insertRecord(4, "d", 4, "Fabian"));
expectedOutput.add(insertRecord(5, "e", null, null));
expectedOutput.add(insertRecord(6, null, null, null));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testTemporalLeftJoinWithFilter() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createHarness(JoinType.LEFT_JOIN, FilterOnTable.WITH_FILTER);
testHarness.open();
testHarness.processElement(insertRecord(1, "a"));
testHarness.processElement(insertRecord(2, "b"));
testHarness.processElement(insertRecord(3, "c"));
testHarness.processElement(insertRecord(4, "d"));
testHarness.processElement(insertRecord(5, "e"));
testHarness.processElement(insertRecord(6, null));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, "a", 1, "Julian"));
expectedOutput.add(insertRecord(2, "b", null, null));
expectedOutput.add(insertRecord(3, "c", 3, "Jackson"));
expectedOutput.add(insertRecord(4, "d", 4, "Fabian"));
expectedOutput.add(insertRecord(5, "e", null, null));
expectedOutput.add(insertRecord(6, null, null, null));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testTemporalLeftJoinWithPreFilter() throws Exception {
ProcessFunction<RowData, RowData> joinRunner =
new LookupJoinRunner(
new GeneratedFunctionWrapper<>(new TestingFetcherFunction()),
new GeneratedCollectorWrapper<>(new TestingFetcherCollector()),
// test the pre-filter via constructor
new GeneratedFilterCondition("", "", new Object[0]) {
@Override
public FilterCondition newInstance(ClassLoader classLoader) {
return new TestingPreFilterCondition();
}
},
true,
2);
ProcessOperator<RowData, RowData> operator = new ProcessOperator<>(joinRunner);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
new OneInputStreamOperatorTestHarness<>(operator, inSerializer);
testHarness.open();
testHarness.processElement(insertRecord(1, "a"));
testHarness.processElement(insertRecord(2, "b"));
testHarness.processElement(insertRecord(3, "c"));
testHarness.processElement(insertRecord(4, "d"));
testHarness.processElement(insertRecord(5, "e"));
testHarness.processElement(insertRecord(6, null));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, "a", 1, "Julian"));
expectedOutput.add(insertRecord(2, "b", null, null));
expectedOutput.add(insertRecord(3, "c", 3, "Jark"));
expectedOutput.add(insertRecord(3, "c", 3, "Jackson"));
expectedOutput.add(insertRecord(4, "d", 4, "Fabian"));
expectedOutput.add(insertRecord(5, "e", null, null));
expectedOutput.add(insertRecord(6, null, null, null));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
// ---------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
private OneInputStreamOperatorTestHarness<RowData, RowData> createHarness(
JoinType joinType, FilterOnTable filterOnTable) throws Exception {
boolean isLeftJoin = joinType == JoinType.LEFT_JOIN;
ProcessFunction<RowData, RowData> joinRunner;
if (filterOnTable == FilterOnTable.WITHOUT_FILTER) {
joinRunner =
new LookupJoinRunner(
new GeneratedFunctionWrapper<>(new TestingFetcherFunction()),
new GeneratedCollectorWrapper<>(new TestingFetcherCollector()),
new GeneratedFunctionWrapper<>(new TestingPreFilterCondition()),
isLeftJoin,
2);
} else {
joinRunner =
new LookupJoinWithCalcRunner(
new GeneratedFunctionWrapper<>(new TestingFetcherFunction()),
new GeneratedFunctionWrapper<>(new CalculateOnTemporalTable()),
new GeneratedCollectorWrapper<>(new TestingFetcherCollector()),
new GeneratedFunctionWrapper<>(new TestingPreFilterCondition()),
isLeftJoin,
2);
}
ProcessOperator<RowData, RowData> operator = new ProcessOperator<>(joinRunner);
return new OneInputStreamOperatorTestHarness<>(operator, inSerializer);
}
/** Whether this is a inner join or left join. */
private | LookupJoinHarnessTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/type/JavaTypeTest.java | {
"start": 776,
"end": 869
} | enum ____ {
A(1), B(2);
private MyEnum2(int value) { }
}
static | MyEnum2 |
java | elastic__elasticsearch | modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java | {
"start": 1769,
"end": 7397
} | class ____ extends AbstractQueryTestCase<ParentIdQueryBuilder> {
private static final String TYPE = "_doc";
private static final String JOIN_FIELD_NAME = "join_field";
private static final String PARENT_NAME = "parent";
private static final String CHILD_NAME = "child";
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return Arrays.asList(ParentJoinPlugin.class);
}
@Override
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
XContentBuilder mapping = jsonBuilder().startObject()
.startObject("_doc")
.startObject("properties")
.startObject("join_field")
.field("type", "join")
.startObject("relations")
.field("parent", "child")
.endObject()
.endObject()
.startObject(TEXT_FIELD_NAME)
.field("type", "text")
.endObject()
.startObject(KEYWORD_FIELD_NAME)
.field("type", "keyword")
.endObject()
.startObject(INT_FIELD_NAME)
.field("type", "integer")
.endObject()
.startObject(DOUBLE_FIELD_NAME)
.field("type", "double")
.endObject()
.startObject(BOOLEAN_FIELD_NAME)
.field("type", "boolean")
.endObject()
.startObject(DATE_FIELD_NAME)
.field("type", "date")
.endObject()
.startObject(OBJECT_FIELD_NAME)
.field("type", "object")
.endObject()
.endObject()
.endObject()
.endObject();
mapperService.merge(TYPE, new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE);
}
@Override
protected ParentIdQueryBuilder doCreateTestQueryBuilder() {
return new ParentIdQueryBuilder(CHILD_NAME, randomAlphaOfLength(4)).ignoreUnmapped(randomBoolean());
}
@Override
protected void doAssertLuceneQuery(ParentIdQueryBuilder queryBuilder, Query query, SearchExecutionContext context) {
assertThat(query, Matchers.instanceOf(BooleanQuery.class));
BooleanQuery booleanQuery = (BooleanQuery) query;
assertThat(booleanQuery.clauses().size(), Matchers.equalTo(2));
BooleanQuery expected = new BooleanQuery.Builder().add(
new TermQuery(new Term(JOIN_FIELD_NAME + "#" + PARENT_NAME, queryBuilder.getId())),
BooleanClause.Occur.MUST
).add(new TermQuery(new Term(JOIN_FIELD_NAME, queryBuilder.getType())), BooleanClause.Occur.FILTER).build();
assertThat(expected, equalTo(query));
}
public void testFromJson() throws IOException {
String query = """
{
"parent_id" : {
"type" : "child",
"id" : "123",
"ignore_unmapped" : true,
"boost" : 3.0,
"_name" : "name" }
}""";
ParentIdQueryBuilder queryBuilder = (ParentIdQueryBuilder) parseQuery(query);
checkGeneratedJson(query, queryBuilder);
assertThat(queryBuilder.getType(), Matchers.equalTo("child"));
assertThat(queryBuilder.getId(), Matchers.equalTo("123"));
assertThat(queryBuilder.boost(), Matchers.equalTo(3f));
assertThat(queryBuilder.queryName(), Matchers.equalTo("name"));
}
public void testDefaultsRemoved() throws IOException {
String query = """
{
"parent_id" : {
"type" : "child",
"id" : "123",
"ignore_unmapped" : false,
"boost" : 1.0
}
}""";
checkGeneratedJson("""
{
"parent_id" : {
"type" : "child",
"id" : "123"
}
}""", parseQuery(query));
}
public void testIgnoreUnmapped() throws IOException {
final ParentIdQueryBuilder queryBuilder = new ParentIdQueryBuilder("unmapped", "foo");
queryBuilder.ignoreUnmapped(true);
Query query = queryBuilder.toQuery(createSearchExecutionContext());
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
final ParentIdQueryBuilder failingQueryBuilder = new ParentIdQueryBuilder("unmapped", "foo");
failingQueryBuilder.ignoreUnmapped(false);
QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createSearchExecutionContext()));
assertThat(e.getMessage(), containsString("[" + ParentIdQueryBuilder.NAME + "] no relation found for child [unmapped]"));
}
public void testThrowsOnNullTypeOrId() {
expectThrows(IllegalArgumentException.class, () -> new ParentIdQueryBuilder(null, randomAlphaOfLength(5)));
expectThrows(IllegalArgumentException.class, () -> new ParentIdQueryBuilder(randomAlphaOfLength(5), null));
}
public void testDisallowExpensiveQueries() {
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
when(searchExecutionContext.allowExpensiveQueries()).thenReturn(false);
ParentIdQueryBuilder queryBuilder = doCreateTestQueryBuilder();
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> queryBuilder.toQuery(searchExecutionContext));
assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage());
}
}
| ParentIdQueryBuilderTests |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/service/registry/ImportHttpServiceRegistrar.java | {
"start": 1121,
"end": 2239
} | class ____ extends AbstractHttpServiceRegistrar {
@Override
protected void registerHttpServices(GroupRegistry registry, AnnotationMetadata metadata) {
MergedAnnotation<?> groupsAnnot = metadata.getAnnotations().get(ImportHttpServices.Container.class);
if (groupsAnnot.isPresent()) {
for (MergedAnnotation<?> annot : groupsAnnot.getAnnotationArray("value", ImportHttpServices.class)) {
processImportAnnotation(annot, registry);
}
}
metadata.getAnnotations().stream(ImportHttpServices.class)
.forEach(annot -> processImportAnnotation(annot, registry));
}
private void processImportAnnotation(MergedAnnotation<?> annotation, GroupRegistry groupRegistry) {
String groupName = annotation.getString("group");
HttpServiceGroup.ClientType clientType = annotation.getEnum("clientType", HttpServiceGroup.ClientType.class);
groupRegistry.forGroup(groupName, clientType)
.register(annotation.getClassArray("types"))
.detectInBasePackages(annotation.getStringArray("basePackages"))
.detectInBasePackages(annotation.getClassArray("basePackageClasses"));
}
}
| ImportHttpServiceRegistrar |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/DispatcherServices.java | {
"start": 1816,
"end": 9423
} | class ____ {
private final Configuration configuration;
private final HighAvailabilityServices highAvailabilityServices;
private final GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever;
private final BlobServer blobServer;
private final HeartbeatServices heartbeatServices;
private final JobManagerMetricGroup jobManagerMetricGroup;
private final ExecutionGraphInfoStore executionGraphInfoStore;
private final FatalErrorHandler fatalErrorHandler;
private final HistoryServerArchivist historyServerArchivist;
@Nullable private final String metricQueryServiceAddress;
private final DispatcherOperationCaches operationCaches;
private final ExecutionPlanWriter executionPlanWriter;
private final JobResultStore jobResultStore;
private final JobManagerRunnerFactory jobManagerRunnerFactory;
private final CleanupRunnerFactory cleanupRunnerFactory;
private final Executor ioExecutor;
private final Collection<FailureEnricher> failureEnrichers;
DispatcherServices(
Configuration configuration,
HighAvailabilityServices highAvailabilityServices,
GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever,
BlobServer blobServer,
HeartbeatServices heartbeatServices,
ExecutionGraphInfoStore executionGraphInfoStore,
FatalErrorHandler fatalErrorHandler,
HistoryServerArchivist historyServerArchivist,
@Nullable String metricQueryServiceAddress,
DispatcherOperationCaches operationCaches,
JobManagerMetricGroup jobManagerMetricGroup,
ExecutionPlanWriter planWriter,
JobResultStore jobResultStore,
JobManagerRunnerFactory jobManagerRunnerFactory,
CleanupRunnerFactory cleanupRunnerFactory,
Executor ioExecutor,
Collection<FailureEnricher> failureEnrichers) {
this.configuration = Preconditions.checkNotNull(configuration, "Configuration");
this.highAvailabilityServices =
Preconditions.checkNotNull(highAvailabilityServices, "HighAvailabilityServices");
this.resourceManagerGatewayRetriever =
Preconditions.checkNotNull(
resourceManagerGatewayRetriever, "ResourceManagerGatewayRetriever");
this.blobServer = Preconditions.checkNotNull(blobServer, "BlobServer");
this.heartbeatServices = Preconditions.checkNotNull(heartbeatServices, "HeartBeatServices");
this.executionGraphInfoStore =
Preconditions.checkNotNull(executionGraphInfoStore, "ExecutionGraphInfoStore");
this.fatalErrorHandler = Preconditions.checkNotNull(fatalErrorHandler, "FatalErrorHandler");
this.historyServerArchivist =
Preconditions.checkNotNull(historyServerArchivist, "HistoryServerArchivist");
this.metricQueryServiceAddress = metricQueryServiceAddress;
this.operationCaches = Preconditions.checkNotNull(operationCaches, "OperationCaches");
this.jobManagerMetricGroup =
Preconditions.checkNotNull(jobManagerMetricGroup, "JobManagerMetricGroup");
this.executionPlanWriter = Preconditions.checkNotNull(planWriter, "ExecutionPlanWriter");
this.jobResultStore = Preconditions.checkNotNull(jobResultStore, "JobResultStore");
this.jobManagerRunnerFactory =
Preconditions.checkNotNull(jobManagerRunnerFactory, "JobManagerRunnerFactory");
this.cleanupRunnerFactory =
Preconditions.checkNotNull(cleanupRunnerFactory, "CleanupRunnerFactory");
this.ioExecutor = Preconditions.checkNotNull(ioExecutor, "IOExecutor");
this.failureEnrichers = Preconditions.checkNotNull(failureEnrichers, "FailureEnrichers");
}
public Configuration getConfiguration() {
return configuration;
}
public HighAvailabilityServices getHighAvailabilityServices() {
return highAvailabilityServices;
}
public GatewayRetriever<ResourceManagerGateway> getResourceManagerGatewayRetriever() {
return resourceManagerGatewayRetriever;
}
public BlobServer getBlobServer() {
return blobServer;
}
public HeartbeatServices getHeartbeatServices() {
return heartbeatServices;
}
public JobManagerMetricGroup getJobManagerMetricGroup() {
return jobManagerMetricGroup;
}
public ExecutionGraphInfoStore getArchivedExecutionGraphStore() {
return executionGraphInfoStore;
}
public FatalErrorHandler getFatalErrorHandler() {
return fatalErrorHandler;
}
public HistoryServerArchivist getHistoryServerArchivist() {
return historyServerArchivist;
}
@Nullable
public String getMetricQueryServiceAddress() {
return metricQueryServiceAddress;
}
public DispatcherOperationCaches getOperationCaches() {
return operationCaches;
}
public ExecutionPlanWriter getExecutionPlanWriter() {
return executionPlanWriter;
}
public JobResultStore getJobResultStore() {
return jobResultStore;
}
JobManagerRunnerFactory getJobManagerRunnerFactory() {
return jobManagerRunnerFactory;
}
CleanupRunnerFactory getCleanupRunnerFactory() {
return cleanupRunnerFactory;
}
public Executor getIoExecutor() {
return ioExecutor;
}
public Collection<FailureEnricher> getFailureEnrichers() {
return failureEnrichers;
}
public static DispatcherServices from(
PartialDispatcherServicesWithJobPersistenceComponents
partialDispatcherServicesWithJobPersistenceComponents,
JobManagerRunnerFactory jobManagerRunnerFactory,
CleanupRunnerFactory cleanupRunnerFactory) {
return new DispatcherServices(
partialDispatcherServicesWithJobPersistenceComponents.getConfiguration(),
partialDispatcherServicesWithJobPersistenceComponents.getHighAvailabilityServices(),
partialDispatcherServicesWithJobPersistenceComponents
.getResourceManagerGatewayRetriever(),
partialDispatcherServicesWithJobPersistenceComponents.getBlobServer(),
partialDispatcherServicesWithJobPersistenceComponents.getHeartbeatServices(),
partialDispatcherServicesWithJobPersistenceComponents
.getArchivedExecutionGraphStore(),
partialDispatcherServicesWithJobPersistenceComponents.getFatalErrorHandler(),
partialDispatcherServicesWithJobPersistenceComponents.getHistoryServerArchivist(),
partialDispatcherServicesWithJobPersistenceComponents
.getMetricQueryServiceAddress(),
partialDispatcherServicesWithJobPersistenceComponents.getOperationCaches(),
partialDispatcherServicesWithJobPersistenceComponents
.getJobManagerMetricGroupFactory()
.create(),
partialDispatcherServicesWithJobPersistenceComponents.getExecutionPlanWriter(),
partialDispatcherServicesWithJobPersistenceComponents.getJobResultStore(),
jobManagerRunnerFactory,
cleanupRunnerFactory,
partialDispatcherServicesWithJobPersistenceComponents.getIoExecutor(),
partialDispatcherServicesWithJobPersistenceComponents.getFailureEnrichers());
}
}
| DispatcherServices |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/stubbing/answers/DeepStubReturnsEnumJava11Test.java | {
"start": 1784,
"end": 2109
} | enum ____ {
A {
@Override
String getValue() {
return this.name();
}
},
B {
@Override
String getValue() {
return this.name();
}
},
;
abstract String getValue();
}
}
| TestEnum |
java | apache__camel | components/camel-dropbox/src/test/java/org/apache/camel/component/dropbox/integration/DropboxTestSupport.java | {
"start": 1453,
"end": 4517
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(DropboxTestSupport.class);
protected final Properties properties;
protected String workdir;
protected String token;
protected String apiKey;
protected String apiSecret;
protected String refreshToken;
protected Long expireIn;
private final DbxClientV2 client;
protected DropboxTestSupport() {
properties = loadProperties();
workdir = properties.getProperty("workDir");
token = properties.getProperty("accessToken");
refreshToken = properties.getProperty("refreshToken");
apiKey = properties.getProperty("apiKey");
apiSecret = properties.getProperty("apiSecret");
expireIn = Long.valueOf(properties.getProperty("expireIn"));
DbxRequestConfig config = DbxRequestConfig.newBuilder(properties.getProperty("clientIdentifier")).build();
DbxCredential credential = new DbxCredential(token, expireIn, refreshToken, apiKey, apiSecret);
client = new DbxClientV2(config, credential);
}
private static Properties loadProperties() {
return TestSupport.loadExternalPropertiesQuietly(DropboxTestSupport.class, "/test-options.properties");
}
// Used by JUnit to automatically trigger the integration tests
@SuppressWarnings("unused")
private static boolean hasCredentials() throws IOException {
Properties properties = loadProperties();
return !properties.getProperty("accessToken", "").isEmpty();
}
@BeforeEach
public void setUpWorkingFolder() throws DbxException {
createDir(workdir);
}
protected void createDir(String name) throws DbxException {
try {
removeDir(name);
} finally {
client.files().createFolderV2(name);
}
}
protected void removeDir(String name) throws DbxException {
client.files().deleteV2(name);
}
protected void createFile(String fileName, String content) throws IOException {
try {
client.files().uploadBuilder(workdir + "/" + fileName)
.uploadAndFinish(new ByteArrayInputStream(content.getBytes()));
//wait some time for synchronization
Thread.sleep(1000);
} catch (DbxException e) {
LOG.info("folder is already created");
} catch (InterruptedException e) {
LOG.debug("Waiting for synchronization interrupted.");
}
}
protected String getFileContent(String path) throws DbxException, IOException {
try (ByteArrayOutputStream target = new ByteArrayOutputStream();
DbxDownloader<FileMetadata> downloadedFile = client.files().download(path)) {
if (downloadedFile != null) {
downloadedFile.download(target);
}
return target.toString();
}
}
@Override
protected Properties useOverridePropertiesWithPropertiesComponent() {
return properties;
}
}
| DropboxTestSupport |
java | google__dagger | javatests/dagger/internal/codegen/MissingBindingValidationTest.java | {
"start": 85334,
"end": 85523
} | interface ____ {}");
Source qux =
CompilerTests.javaSource( // force one-string-per-line format
"test.Qux",
"package test;",
"",
" | Bar |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java | {
"start": 681,
"end": 864
} | class ____ for the creation of an API key. The request requires a name to be provided
* and optionally an expiration time and permission limitation can be provided.
*/
public final | used |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java | {
"start": 5917,
"end": 6617
} | class ____ the given string into a DN and return its normalized format.
* If the input string is not a valid DN, {@code null} is returned.
* The DN parsing and normalization are cached internally so that the same
* input string will only be processed once (as long as the cache entry is not GC'd).
* The cache works regardless of whether the input string is a valid DN.
*
* The cache uses {@link SoftReference} for its values so that they free for GC.
* This is to prevent potential memory pressure when there are many concurrent role
* mapping processes coupled with large number of groups and role mappings, which
* in theory is unbounded.
*/
| parse |
java | apache__flink | flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecoratorTest.java | {
"start": 1624,
"end": 6138
} | class ____ extends KubernetesJobManagerTestBase {
private ExternalServiceDecorator externalServiceDecorator;
private Map<String, String> customizedAnnotations =
Map.of(
"annotation1", "annotation-value1",
"annotation2", "annotation-value2");
private Map<String, String> customizedLabels =
Map.of(
"label1", "label-value1",
"label2", "label-value2");
@Override
protected void onSetup() throws Exception {
super.onSetup();
this.flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_ANNOTATIONS, customizedAnnotations);
this.flinkConfig.set(KubernetesConfigOptions.REST_SERVICE_LABELS, customizedLabels);
this.externalServiceDecorator =
new ExternalServiceDecorator(this.kubernetesJobManagerParameters);
}
@Test
void testBuildAccompanyingKubernetesResources() throws IOException {
final List<HasMetadata> resources =
this.externalServiceDecorator.buildAccompanyingKubernetesResources();
assertThat(resources).hasSize(1);
final Service restService = (Service) resources.get(0);
assertThat(restService.getApiVersion()).isEqualTo(Constants.API_VERSION);
assertThat(restService.getMetadata().getName())
.isEqualTo(ExternalServiceDecorator.getExternalServiceName(CLUSTER_ID));
final Map<String, String> expectedLabels = getCommonLabels();
expectedLabels.putAll(customizedLabels);
assertThat(restService.getMetadata().getLabels()).isEqualTo(expectedLabels);
assertThat(restService.getSpec().getType())
.isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name());
final List<ServicePort> expectedServicePorts =
Collections.singletonList(
new ServicePortBuilder()
.withName(Constants.REST_PORT_NAME)
.withPort(REST_PORT)
.withNewTargetPort(Integer.valueOf(REST_BIND_PORT))
.build());
assertThat(restService.getSpec().getPorts()).isEqualTo(expectedServicePorts);
final Map<String, String> expectedSelectors = getCommonLabels();
expectedSelectors.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER);
assertThat(restService.getSpec().getSelector()).isEqualTo(expectedSelectors);
final Map<String, String> resultAnnotations = restService.getMetadata().getAnnotations();
assertThat(resultAnnotations).isEqualTo(customizedAnnotations);
}
@Test
void testSetServiceExposedType() throws IOException {
this.flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE,
KubernetesConfigOptions.ServiceExposedType.NodePort);
final List<HasMetadata> resources =
this.externalServiceDecorator.buildAccompanyingKubernetesResources();
assertThat(((Service) resources.get(0)).getSpec().getType())
.isEqualTo(KubernetesConfigOptions.ServiceExposedType.NodePort.name());
this.flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE,
KubernetesConfigOptions.ServiceExposedType.ClusterIP);
final List<HasMetadata> servicesWithClusterIP =
this.externalServiceDecorator.buildAccompanyingKubernetesResources();
assertThat(((Service) servicesWithClusterIP.get(0)).getSpec().getType())
.isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name());
}
@Test
void testSetServiceExposedTypeWithHeadless() throws IOException {
this.flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE,
KubernetesConfigOptions.ServiceExposedType.Headless_ClusterIP);
final List<HasMetadata> servicesWithHeadlessClusterIP =
this.externalServiceDecorator.buildAccompanyingKubernetesResources();
assertThat(((Service) servicesWithHeadlessClusterIP.get(0)).getSpec().getType())
.isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name());
assertThat(((Service) servicesWithHeadlessClusterIP.get(0)).getSpec().getClusterIP())
.isEqualTo(HeadlessClusterIPService.HEADLESS_CLUSTER_IP);
}
}
| ExternalServiceDecoratorTest |
java | quarkusio__quarkus | extensions/security-jpa-common/runtime/src/main/java/io/quarkus/security/jpa/common/runtime/JpaIdentityProviderUtil.java | {
"start": 924,
"end": 1016
} | class ____ {
private JpaIdentityProviderUtil() {
// utility | JpaIdentityProviderUtil |
java | apache__rocketmq | remoting/src/test/java/org/apache/rocketmq/remoting/SubRemotingServerTest.java | {
"start": 1588,
"end": 4901
} | class ____ {
private static final int SUB_SERVER_PORT = 1234;
private static RemotingServer remotingServer;
private static RemotingClient remotingClient;
private static RemotingServer subServer;
@BeforeClass
public static void setup() throws InterruptedException {
remotingServer = RemotingServerTest.createRemotingServer();
remotingClient = RemotingServerTest.createRemotingClient();
subServer = createSubRemotingServer(remotingServer);
}
@AfterClass
public static void destroy() {
remotingClient.shutdown();
remotingServer.shutdown();
}
public static RemotingServer createSubRemotingServer(RemotingServer parentServer) {
RemotingServer subServer = parentServer.newRemotingServer(SUB_SERVER_PORT);
subServer.registerProcessor(1, new NettyRequestProcessor() {
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
final RemotingCommand request) throws Exception {
request.setRemark(String.valueOf(RemotingHelper.parseSocketAddressPort(ctx.channel().localAddress())));
return request;
}
@Override
public boolean rejectRequest() {
return false;
}
}, null);
subServer.start();
return subServer;
}
@Test
public void testInvokeSubRemotingServer() throws InterruptedException, RemotingTimeoutException,
RemotingConnectException, RemotingSendRequestException {
RequestHeader requestHeader = new RequestHeader();
requestHeader.setCount(1);
requestHeader.setMessageTitle("Welcome");
// Parent remoting server doesn't support RequestCode 1
RemotingCommand request = RemotingCommand.createRequestCommand(1, requestHeader);
RemotingCommand response = remotingClient.invokeSync("localhost:" + remotingServer.localListenPort(), request,
1000 * 3);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(RemotingSysResponseCode.REQUEST_CODE_NOT_SUPPORTED);
// Issue request to SubRemotingServer
response = remotingClient.invokeSync("localhost:1234", request, 1000 * 3);
assertThat(response).isNotNull();
assertThat(response.getExtFields()).hasSize(2);
assertThat(response.getRemark()).isEqualTo(String.valueOf(SUB_SERVER_PORT));
// Issue unsupported request to SubRemotingServer
request.setCode(0);
response = remotingClient.invokeSync("localhost:1234", request, 1000 * 3);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(RemotingSysResponseCode.REQUEST_CODE_NOT_SUPPORTED);
// Issue request to a closed SubRemotingServer
request.setCode(1);
remotingServer.removeRemotingServer(SUB_SERVER_PORT);
subServer.shutdown();
try {
remotingClient.invokeSync("localhost:1234", request, 1000 * 3);
failBecauseExceptionWasNotThrown(RemotingTimeoutException.class);
} catch (Exception e) {
assertThat(e).isInstanceOfAny(RemotingTimeoutException.class, RemotingSendRequestException.class);
}
}
}
| SubRemotingServerTest |
java | apache__camel | components/camel-ai/camel-chatscript/src/main/java/org/apache/camel/component/chatscript/ChatScriptEndpoint.java | {
"start": 1707,
"end": 4760
} | class ____ extends DefaultEndpoint {
private ChatScriptBot bot;
@UriPath(description = "Hostname or IP of the server on which CS server is running")
@Metadata(required = true)
private String host;
@UriPath(description = "Port on which ChatScript is listening to", defaultValue = "" + DEFAULT_PORT)
private int port;
@UriPath(description = "Name of the Bot in CS to converse with")
@Metadata(required = true)
private String botName;
@UriParam(description = "Username who initializes the CS conversation. To be set when chat is initialized from camel route")
private String chatUserName;
@UriParam(description = "Issues :reset command to start a new conversation everytime", defaultValue = "false")
private boolean resetChat;
public ChatScriptEndpoint() {
}
public ChatScriptEndpoint(String uri, String remaining,
ChatScriptComponent component) throws URISyntaxException {
super(uri, component);
URI remainingUri = new URI("tcp://" + remaining);
port = remainingUri.getPort() == -1 ? DEFAULT_PORT : remainingUri.getPort();
if (ObjectHelper.isEmpty(remainingUri.getPath())) {
throw new IllegalArgumentException(ChatScriptConstants.URI_ERROR);
}
host = remainingUri.getHost();
if (ObjectHelper.isEmpty(host)) {
throw new IllegalArgumentException(ChatScriptConstants.URI_ERROR);
}
botName = remainingUri.getPath();
if (ObjectHelper.isEmpty(botName)) {
throw new IllegalArgumentException(ChatScriptConstants.URI_ERROR);
}
botName = botName.substring(1);
setBot(new ChatScriptBot(getHost(), getPort(), getBotName(), ""));
}
public boolean isResetChat() {
return resetChat;
}
public void setResetChat(boolean resetChat) {
this.resetChat = resetChat;
}
public String getChatUserName() {
return chatUserName;
}
public void setChatUserName(String chatusername) {
this.chatUserName = chatusername;
}
@Override
public Producer createProducer() throws Exception {
return new ChatScriptProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("Chatscript consumer not supported");
}
public String getHost() {
return host;
}
public void setHost(String hostName) {
this.host = hostName;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getBotName() {
return botName;
}
public void setBotName(String botname) {
this.botName = botname;
}
public static int getDefaultPort() {
return DEFAULT_PORT;
}
public ChatScriptBot getBot() {
return bot;
}
public void setBot(ChatScriptBot thisBot) {
this.bot = thisBot;
}
}
| ChatScriptEndpoint |
java | grpc__grpc-java | api/src/main/java/io/grpc/LoadBalancer.java | {
"start": 20064,
"end": 30667
} | class ____ {
private static final PickResult NO_RESULT = new PickResult(null, null, Status.OK, false);
@Nullable private final Subchannel subchannel;
@Nullable private final ClientStreamTracer.Factory streamTracerFactory;
// An error to be propagated to the application if subchannel == null
// Or OK if there is no error.
// subchannel being null and error being OK means RPC needs to wait
private final Status status;
// True if the result is created by withDrop()
private final boolean drop;
@Nullable private final String authorityOverride;
private PickResult(
@Nullable Subchannel subchannel, @Nullable ClientStreamTracer.Factory streamTracerFactory,
Status status, boolean drop) {
this.subchannel = subchannel;
this.streamTracerFactory = streamTracerFactory;
this.status = checkNotNull(status, "status");
this.drop = drop;
this.authorityOverride = null;
}
private PickResult(
@Nullable Subchannel subchannel, @Nullable ClientStreamTracer.Factory streamTracerFactory,
Status status, boolean drop, @Nullable String authorityOverride) {
this.subchannel = subchannel;
this.streamTracerFactory = streamTracerFactory;
this.status = checkNotNull(status, "status");
this.drop = drop;
this.authorityOverride = authorityOverride;
}
/**
* A decision to proceed the RPC on a Subchannel.
*
* <p>The Subchannel should either be an original Subchannel returned by {@link
* Helper#createSubchannel Helper.createSubchannel()}, or a wrapper of it preferably based on
* {@code ForwardingSubchannel}. At the very least its {@link Subchannel#getInternalSubchannel
* getInternalSubchannel()} must return the same object as the one returned by the original.
* Otherwise the Channel cannot use it for the RPC.
*
* <p>When the RPC tries to use the return Subchannel, which is briefly after this method
* returns, the state of the Subchannel will decide where the RPC would go:
*
* <ul>
* <li>READY: the RPC will proceed on this Subchannel.</li>
* <li>IDLE: the RPC will be buffered. Subchannel will attempt to create connection.</li>
* <li>All other states: the RPC will be buffered.</li>
* </ul>
*
* <p><strong>All buffered RPCs will stay buffered</strong> until the next call of {@link
* Helper#updateBalancingState Helper.updateBalancingState()}, which will trigger a new picking
* process.
*
* <p>Note that Subchannel's state may change at the same time the picker is making the
* decision, which means the decision may be made with (to-be) outdated information. For
* example, a picker may return a Subchannel known to be READY, but it has become IDLE when is
* about to be used by the RPC, which makes the RPC to be buffered. The LoadBalancer will soon
* learn about the Subchannels' transition from READY to IDLE, create a new picker and allow the
* RPC to use another READY transport if there is any.
*
* <p>You will want to avoid running into a situation where there are READY Subchannels out
* there but some RPCs are still buffered for longer than a brief time.
* <ul>
* <li>This can happen if you return Subchannels with states other than READY and IDLE. For
* example, suppose you round-robin on 2 Subchannels, in READY and CONNECTING states
* respectively. If the picker ignores the state and pick them equally, 50% of RPCs will
* be stuck in buffered state until both Subchannels are READY.</li>
* <li>This can also happen if you don't create a new picker at key state changes of
* Subchannels. Take the above round-robin example again. Suppose you do pick only READY
* and IDLE Subchannels, and initially both Subchannels are READY. Now one becomes IDLE,
* then CONNECTING and stays CONNECTING for a long time. If you don't create a new picker
* in response to the CONNECTING state to exclude that Subchannel, 50% of RPCs will hit it
* and be buffered even though the other Subchannel is READY.</li>
* </ul>
*
* <p>In order to prevent unnecessary delay of RPCs, the rules of thumb are:
* <ol>
* <li>The picker should only pick Subchannels that are known as READY or IDLE. Whether to
* pick IDLE Subchannels depends on whether you want Subchannels to connect on-demand or
* actively:
* <ul>
* <li>If you want connect-on-demand, include IDLE Subchannels in your pick results,
* because when an RPC tries to use an IDLE Subchannel, the Subchannel will try to
* connect.</li>
* <li>If you want Subchannels to be always connected even when there is no RPC, you
* would call {@link Subchannel#requestConnection Subchannel.requestConnection()}
* whenever the Subchannel has transitioned to IDLE, then you don't need to include
* IDLE Subchannels in your pick results.</li>
* </ul></li>
* <li>Always create a new picker and call {@link Helper#updateBalancingState
* Helper.updateBalancingState()} whenever {@link #handleSubchannelState
* handleSubchannelState()} is called, unless the new state is SHUTDOWN. See
* {@code handleSubchannelState}'s javadoc for more details.</li>
* </ol>
*
* @param subchannel the picked Subchannel. It must have been {@link Subchannel#start started}
* @param streamTracerFactory if not null, will be used to trace the activities of the stream
* created as a result of this pick. Note it's possible that no
* stream is created at all in some cases.
* @since 1.3.0
*/
public static PickResult withSubchannel(
Subchannel subchannel, @Nullable ClientStreamTracer.Factory streamTracerFactory) {
return new PickResult(
checkNotNull(subchannel, "subchannel"), streamTracerFactory, Status.OK,
false);
}
/**
* Same as {@code withSubchannel(subchannel, streamTracerFactory)} but with an authority name
* to override in the host header.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11656")
public static PickResult withSubchannel(
Subchannel subchannel, @Nullable ClientStreamTracer.Factory streamTracerFactory,
@Nullable String authorityOverride) {
return new PickResult(
checkNotNull(subchannel, "subchannel"), streamTracerFactory, Status.OK,
false, authorityOverride);
}
/**
* Equivalent to {@code withSubchannel(subchannel, null)}.
*
* @since 1.2.0
*/
public static PickResult withSubchannel(Subchannel subchannel) {
return withSubchannel(subchannel, null);
}
/**
* A decision to report a connectivity error to the RPC. If the RPC is {@link
* CallOptions#withWaitForReady wait-for-ready}, it will stay buffered. Otherwise, it will fail
* with the given error.
*
* @param error the error status. Must not be OK.
* @since 1.2.0
*/
public static PickResult withError(Status error) {
Preconditions.checkArgument(!error.isOk(), "error status shouldn't be OK");
return new PickResult(null, null, error, false);
}
/**
* A decision to fail an RPC immediately. This is a final decision and will ignore retry
* policy.
*
* @param status the status with which the RPC will fail. Must not be OK.
* @since 1.8.0
*/
public static PickResult withDrop(Status status) {
Preconditions.checkArgument(!status.isOk(), "drop status shouldn't be OK");
return new PickResult(null, null, status, true);
}
/**
* No decision could be made. The RPC will stay buffered.
*
* @since 1.2.0
*/
public static PickResult withNoResult() {
return NO_RESULT;
}
/** Returns the authority override if any. */
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11656")
@Nullable
public String getAuthorityOverride() {
return authorityOverride;
}
/**
* The Subchannel if this result was created by {@link #withSubchannel withSubchannel()}, or
* null otherwise.
*
* @since 1.2.0
*/
@Nullable
public Subchannel getSubchannel() {
return subchannel;
}
/**
* The stream tracer factory this result was created with.
*
* @since 1.3.0
*/
@Nullable
public ClientStreamTracer.Factory getStreamTracerFactory() {
return streamTracerFactory;
}
/**
* The status associated with this result. Non-{@code OK} if created with {@link #withError
* withError}, or {@code OK} otherwise.
*
* @since 1.2.0
*/
public Status getStatus() {
return status;
}
/**
* Returns {@code true} if this result was created by {@link #withDrop withDrop()}.
*
* @since 1.8.0
*/
public boolean isDrop() {
return drop;
}
/**
* Returns {@code true} if the pick was not created with {@link #withNoResult()}.
*/
public boolean hasResult() {
return !(subchannel == null && status.isOk());
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("subchannel", subchannel)
.add("streamTracerFactory", streamTracerFactory)
.add("status", status)
.add("drop", drop)
.add("authority-override", authorityOverride)
.toString();
}
@Override
public int hashCode() {
return Objects.hashCode(subchannel, status, streamTracerFactory, drop);
}
/**
* Returns true if the {@link Subchannel}, {@link Status}, and
* {@link ClientStreamTracer.Factory} all match.
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof PickResult)) {
return false;
}
PickResult that = (PickResult) other;
return Objects.equal(subchannel, that.subchannel) && Objects.equal(status, that.status)
&& Objects.equal(streamTracerFactory, that.streamTracerFactory)
&& drop == that.drop;
}
}
/**
* Arguments for creating a {@link Subchannel}.
*
* @since 1.22.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1771")
public static final | PickResult |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/resource/bitmap/ImageReader.java | {
"start": 6089,
"end": 7528
} | class ____ implements ImageReader {
private final InputStreamRewinder dataRewinder;
private final ArrayPool byteArrayPool;
private final List<ImageHeaderParser> parsers;
InputStreamImageReader(
InputStream is, List<ImageHeaderParser> parsers, ArrayPool byteArrayPool) {
this.byteArrayPool = Preconditions.checkNotNull(byteArrayPool);
this.parsers = Preconditions.checkNotNull(parsers);
dataRewinder = new InputStreamRewinder(is, byteArrayPool);
}
@Nullable
@Override
public Bitmap decodeBitmap(BitmapFactory.Options options) throws IOException {
InputStream inputStream = dataRewinder.rewindAndGet();
return GlideBitmapFactory.decodeStream(inputStream, options, this);
}
@Override
public ImageHeaderParser.ImageType getImageType() throws IOException {
return ImageHeaderParserUtils.getType(parsers, dataRewinder.rewindAndGet(), byteArrayPool);
}
@Override
public int getImageOrientation() throws IOException {
return ImageHeaderParserUtils.getOrientation(
parsers, dataRewinder.rewindAndGet(), byteArrayPool);
}
@Override
public boolean hasJpegMpf() throws IOException {
return ImageHeaderParserUtils.hasJpegMpf(parsers, dataRewinder.rewindAndGet(), byteArrayPool);
}
@Override
public void stopGrowingBuffers() {
dataRewinder.fixMarkLimits();
}
}
final | InputStreamImageReader |
java | quarkusio__quarkus | extensions/reactive-routes/deployment/src/test/java/io/quarkus/vertx/web/context/DuplicatedContextTest.java | {
"start": 1085,
"end": 2429
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyRoutes.class));
@Test
public void testThatRoutesAreCalledOnDuplicatedContext() {
// Creates a bunch of requests that will be executed concurrently.
// So, we are sure that event loops are reused.
List<Uni<Void>> unis = new ArrayList<>();
for (int i = 0; i < 500; i++) {
String uuid = UUID.randomUUID().toString();
unis.add(Uni.createFrom().item(() -> {
String resp = get("/context/" + uuid).asString();
Assertions.assertEquals(resp, "OK-" + uuid);
return null;
}));
}
Uni.join().all(unis).andFailFast()
.runSubscriptionOn(Infrastructure.getDefaultExecutor())
.await().atMost(Duration.ofSeconds(10));
}
@Test
@Disabled("This test is flaky on CI, must be investigated")
public void testThatBlockingRoutesAreCalledOnDuplicatedContext() {
String uuid = UUID.randomUUID().toString();
String resp = get("/context-blocking/" + uuid).asString();
Assertions.assertEquals(resp, "OK-" + uuid);
}
@ApplicationScoped
public static | DuplicatedContextTest |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/HibernateOrmRuntimeConfigPersistenceUnit.java | {
"start": 3003,
"end": 4757
} | interface ____ {
/**
* Schema generation configuration.
*/
@Deprecated(forRemoval = true, since = "3.22")
HibernateOrmConfigPersistenceUnitDatabaseGeneration generation();
/**
* The default catalog to use for the database objects.
*/
Optional<@WithConverter(TrimmedStringConverter.class) String> defaultCatalog();
/**
* The default schema to use for the database objects.
*/
Optional<@WithConverter(TrimmedStringConverter.class) String> defaultSchema();
/**
* Whether Hibernate ORM should check on startup
* that the version of the database matches the version configured on the dialect
* (either the default version, or the one set through `quarkus.datasource.db-version`).
*
* This should be set to `false` if the database is not available on startup.
*
* @asciidoclet
*/
@WithName("version-check.enabled")
@ConfigDocDefault("`false` if starting offline (see `start-offline`), `true` otherwise")
Optional<Boolean> versionCheckEnabled();
/**
* Instructs Hibernate ORM to avoid connecting to the database on startup.
*
* When starting offline:
* * Hibernate ORM will not attempt to create a schema automatically, so it must already be created when the application
* hits the database for the first time.
* * Quarkus will not check that the database version matches the one configured at build time.
*
* @asciidoclet
*/
@WithDefault("false")
boolean startOffline();
}
@ConfigGroup
| HibernateOrmConfigPersistenceUnitDatabase |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/abstract_/AbstractAssert_is_Test.java | {
"start": 1061,
"end": 1510
} | class ____ extends AbstractAssertBaseTest {
private static Condition<Object> condition;
@BeforeAll
static void setUpOnce() {
condition = new TestCondition<>();
}
@Override
protected ConcreteAssert invoke_api_method() {
return assertions.is(condition);
}
@Override
protected void verify_internal_effects() {
verify(conditions).assertIs(getInfo(assertions), getActual(assertions), condition);
}
}
| AbstractAssert_is_Test |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/protocol/Readable.java | {
"start": 1125,
"end": 2868
} | interface ____ {
byte readByte();
short readShort();
int readInt();
long readLong();
double readDouble();
byte[] readArray(int length);
int readUnsignedVarint();
ByteBuffer readByteBuffer(int length);
int readVarint();
long readVarlong();
int remaining();
/**
* Returns a new Readable object whose content will be shared with this object.
* <br>
* The content of the new Readable object will start at this Readable's current
* position. The two Readable position will be independent, so read from one will
* not impact the other.
*/
Readable slice();
default String readString(int length) {
byte[] arr = readArray(length);
return new String(arr, StandardCharsets.UTF_8);
}
default List<RawTaggedField> readUnknownTaggedField(List<RawTaggedField> unknowns, int tag, int size) {
if (unknowns == null) {
unknowns = new ArrayList<>();
}
byte[] data = readArray(size);
unknowns.add(new RawTaggedField(tag, data));
return unknowns;
}
default MemoryRecords readRecords(int length) {
if (length < 0) {
// no records
return null;
} else {
ByteBuffer recordsBuffer = readByteBuffer(length);
return MemoryRecords.readableRecords(recordsBuffer);
}
}
/**
* Read a UUID with the most significant digits first.
*/
default Uuid readUuid() {
return new Uuid(readLong(), readLong());
}
default int readUnsignedShort() {
return Short.toUnsignedInt(readShort());
}
default long readUnsignedInt() {
return Integer.toUnsignedLong(readInt());
}
}
| Readable |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jaxrs/runtime/src/main/java/io/quarkus/jaxrs/client/reactive/runtime/JaxrsClientReactiveRecorder.java | {
"start": 828,
"end": 3865
} | class ____ extends ResteasyReactiveCommonRecorder {
private static volatile Serialisers serialisers;
private static volatile GenericTypeMapping genericTypeMapping;
private static volatile Map<Class<?>, MultipartResponseData> multipartResponsesData;
private static volatile ClientProxies clientProxies = new ClientProxies(Collections.emptyMap(), Collections.emptyMap());
public static ClientProxies getClientProxies() {
return clientProxies;
}
public static Serialisers getSerialisers() {
return serialisers;
}
public static GenericTypeMapping getGenericTypeMapping() {
return genericTypeMapping;
}
public static Map<Class<?>, MultipartResponseData> getMultipartResponsesData() {
return multipartResponsesData;
}
public void setMultipartResponsesData(Map<String, RuntimeValue<MultipartResponseData>> multipartResponsesData) {
Map<Class<?>, MultipartResponseData> runtimeMap = new HashMap<>();
for (Map.Entry<String, RuntimeValue<MultipartResponseData>> multipartData : multipartResponsesData.entrySet()) {
runtimeMap.put(loadClass(multipartData.getKey()), multipartData.getValue().getValue());
}
JaxrsClientReactiveRecorder.multipartResponsesData = runtimeMap;
}
public void setupClientProxies(
Map<String, RuntimeValue<BiFunction<WebTarget, List<ParamConverterProvider>, ?>>> clientImplementations,
Map<String, String> failures) {
clientProxies = createClientImpls(clientImplementations, failures);
}
public Serialisers createSerializers() {
ClientSerialisers s = new ClientSerialisers();
s.registerBuiltins(RuntimeType.CLIENT);
serialisers = s;
return s;
}
private ClientProxies createClientImpls(
Map<String, RuntimeValue<BiFunction<WebTarget, List<ParamConverterProvider>, ?>>> clientImplementations,
Map<String, String> failureMessages) {
Map<Class<?>, BiFunction<WebTarget, List<ParamConverterProvider>, ?>> map = new HashMap<>();
for (Map.Entry<String, RuntimeValue<BiFunction<WebTarget, List<ParamConverterProvider>, ?>>> entry : clientImplementations
.entrySet()) {
map.put(loadClass(entry.getKey()), entry.getValue().getValue());
}
Map<Class<?>, String> failures = new HashMap<>();
for (Map.Entry<String, String> entry : failureMessages.entrySet()) {
failures.put(loadClass(entry.getKey()), entry.getValue());
}
return new ClientProxies(map, failures);
}
public void setGenericTypeMapping(GenericTypeMapping typeMapping) {
genericTypeMapping = typeMapping;
}
public void registerInvocationHandlerGenericType(GenericTypeMapping genericTypeMapping, String invocationHandlerClass,
String resolvedType) {
genericTypeMapping.addInvocationCallback(loadClass(invocationHandlerClass), loadClass(resolvedType));
}
}
| JaxrsClientReactiveRecorder |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java | {
"start": 8605,
"end": 10425
} | class ____ extends ResourceElementResolver {
private final String methodName;
private final Class<?> lookupType;
private ResourceMethodResolver(String name, boolean defaultName, String methodName, Class<?> lookupType) {
super(name, defaultName);
this.methodName = methodName;
this.lookupType = lookupType;
}
@Override
public void resolveAndSet(RegisteredBean registeredBean, Object instance) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
Assert.notNull(instance, "'instance' must not be null");
Method method = getMethod(registeredBean);
Object resolved = resolve(registeredBean);
ReflectionUtils.makeAccessible(method);
ReflectionUtils.invokeMethod(method, instance, resolved);
}
@Override
protected DependencyDescriptor createDependencyDescriptor(RegisteredBean registeredBean) {
return new LookupDependencyDescriptor(
getMethod(registeredBean), this.lookupType, isLazyLookup(registeredBean));
}
@Override
protected Class<?> getLookupType(RegisteredBean bean) {
return this.lookupType;
}
@Override
protected AnnotatedElement getAnnotatedElement(RegisteredBean registeredBean) {
return getMethod(registeredBean);
}
private Method getMethod(RegisteredBean registeredBean) {
Method method = ReflectionUtils.findMethod(registeredBean.getBeanClass(), this.methodName, this.lookupType);
Assert.notNull(method,
() -> "Method '%s' with parameter type '%s' declared on %s could not be found.".formatted(
this.methodName, this.lookupType.getName(), registeredBean.getBeanClass().getName()));
return method;
}
}
/**
* Extension of the DependencyDescriptor class,
* overriding the dependency type with the specified resource type.
*/
@SuppressWarnings("serial")
static | ResourceMethodResolver |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/toolchain/RequirementMatcher.java | {
"start": 892,
"end": 967
} | interface ____ {
boolean matches(String requirement);
}
| RequirementMatcher |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMappingWithDependency.java | {
"start": 993,
"end": 1162
} | interface ____ must be implemented to allow pluggable
* DNS-name/IP-address to RackID resolvers.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public | that |
java | quarkusio__quarkus | independent-projects/qute/debug/src/main/java/io/quarkus/qute/debug/agent/source/RemoteSource.java | {
"start": 727,
"end": 3245
} | class ____ extends Source {
/**
* The URI of the template, if available.
* <p>
* May be {@code null} for in-memory or non-file-based templates.
* </p>
*/
private final transient URI uri;
/**
* The Qute template identifier, used internally by the Qute engine.
*/
private final transient String templateId;
/**
* Creates a new remote source associated with the given template.
*
* @param uri the URI of the template source, or {@code null} if not applicable
* @param templateId the template ID used by the Qute engine (never {@code null})
*/
public RemoteSource(URI uri, String templateId) {
this.uri = uri;
this.templateId = templateId;
// Initialize the DAP "name" field for display purposes in the client.
// If the URI is known, extract the filename; otherwise, use the templateId.
super.setName(uri != null ? computeName(uri) : templateId);
}
/**
* Computes a human-readable name for this source, based on its URI.
* <p>
* The default implementation extracts the last segment (file name) from the URI.
* Subclasses may override this method to provide a more descriptive name.
* </p>
*
* @param uri the source URI
* @return a display name for the source
*/
protected String computeName(URI uri) {
return getFileNameFallback(uri.toString());
}
/**
* Returns the URI of this template source.
*
* @return the URI, or {@code null} if not applicable
*/
public URI getUri() {
return uri;
}
/**
* Returns the Qute template identifier.
*
* @return the template ID (never {@code null})
*/
public String getTemplateId() {
return templateId;
}
/**
* Utility method that extracts the file name from a URI or path string.
* <p>
* This method serves as a fallback when the standard {@link URI#getPath()} is
* unavailable or when the input is not a valid URI.
* </p>
*
* @param s a URI or path string
* @return the last segment (file name), or the original string if no separator is found
*/
private static String getFileNameFallback(String s) {
int idx = Math.max(s.lastIndexOf('/'), s.lastIndexOf('\\'));
if (idx != -1 && idx < s.length() - 1) {
return s.substring(idx + 1);
}
return s; // Return the full string if no separator found
}
}
| RemoteSource |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/AWSConfigEndpointBuilderFactory.java | {
"start": 1556,
"end": 15933
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedAWSConfigEndpointBuilder advanced() {
return (AdvancedAWSConfigEndpointBuilder) this;
}
/**
* The operation to perform.
*
* The option is a:
* <code>org.apache.camel.component.aws.config.AWSConfigOperations</code> type.
*
* Required: true
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder operation(org.apache.camel.component.aws.config.AWSConfigOperations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The operation to perform.
*
* The option will be converted to a
* <code>org.apache.camel.component.aws.config.AWSConfigOperations</code> type.
*
* Required: true
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder overrideEndpoint(String overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder pojoRequest(String pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* The region in which the Config client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example, ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder region(String region) {
doSetProperty("region", region);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder uriEndpointOverride(String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* To define a proxy host when instantiating the Config client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder proxyHost(String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the Config client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder proxyPort(Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy port when instantiating the Config client.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder proxyPort(String proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the Config client.
*
* The option is a: <code>software.amazon.awssdk.core.Protocol</code>
* type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* To define a proxy protocol when instantiating the Config client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder proxyProtocol(String proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder accessKey(String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* If using a profile credentials provider, this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder profileCredentialsName(String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder secretKey(String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder sessionToken(String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder trustAllCertificates(String trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the Config client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Config client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useDefaultCredentialsProvider(String useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Config client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Config client should expect to load credentials
* through a profile credentials provider.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useProfileCredentialsProvider(String useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Config client should expect to use Session
* Credentials. This is useful in a situation in which the user needs to
* assume an IAM role for doing operations in Config.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
/**
* Set whether the Config client should expect to use Session
* Credentials. This is useful in a situation in which the user needs to
* assume an IAM role for doing operations in Config.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default AWSConfigEndpointBuilder useSessionCredentials(String useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
/**
* Advanced builder for endpoint for the AWS Config Service component.
*/
public | AWSConfigEndpointBuilder |
java | apache__camel | test-infra/camel-test-infra-nats/src/main/java/org/apache/camel/test/infra/nats/services/NatsLocalContainerInfraService.java | {
"start": 2458,
"end": 3935
} | class ____ extends GenericContainer<NatsContainer> {
public NatsContainer(boolean fixedPort) {
super(imageName);
withNetworkAliases(containerName)
.waitingFor(Wait.forLogMessage(".*Listening.*for.*route.*connections.*", 1));
if (fixedPort) {
addFixedExposedPort(PORT, PORT);
} else {
withExposedPorts(PORT);
}
}
}
return new NatsContainer(ContainerEnvironmentUtil.isFixedPort(this.getClass()));
}
@Override
public void registerProperties() {
System.setProperty(NatsProperties.SERVICE_ADDRESS, getServiceAddress());
}
@Override
public void initialize() {
LOG.info("Trying to start the Nats container");
container.start();
registerProperties();
LOG.info("Nats instance running at {}", getServiceAddress());
}
@Override
public void shutdown() {
LOG.info("Stopping the Nats container");
container.stop();
}
@Override
public GenericContainer<?> getContainer() {
return container;
}
protected String getHost() {
return container.getHost();
}
protected int getPort() {
return container.getMappedPort(PORT);
}
@Override
public String getServiceAddress() {
return String.format("%s:%d", getHost(), getPort());
}
}
| NatsContainer |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/features/ListFeature.java | {
"start": 1153,
"end": 1964
} | enum ____ implements Feature<List> {
SUPPORTS_SET,
SUPPORTS_ADD_WITH_INDEX(CollectionFeature.SUPPORTS_ADD),
SUPPORTS_REMOVE_WITH_INDEX(CollectionFeature.SUPPORTS_REMOVE),
GENERAL_PURPOSE(
CollectionFeature.GENERAL_PURPOSE,
SUPPORTS_SET,
SUPPORTS_ADD_WITH_INDEX,
SUPPORTS_REMOVE_WITH_INDEX),
/** Features supported by lists where only removal is allowed. */
REMOVE_OPERATIONS(CollectionFeature.REMOVE_OPERATIONS, SUPPORTS_REMOVE_WITH_INDEX);
private final Set<Feature<? super List>> implied;
ListFeature(Feature<? super List>... implied) {
this.implied = copyToSet(implied);
}
@Override
public Set<Feature<? super List>> getImpliedFeatures() {
return implied;
}
@Retention(RetentionPolicy.RUNTIME)
@Inherited
@TesterAnnotation
public @ | ListFeature |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ObjectToString.java | {
"start": 1717,
"end": 3749
} | class ____ extends AbstractToString {
private static boolean finalNoOverrides(Type type, VisitorState state) {
if (type == null) {
return false;
}
// We don't flag use of toString() on non-final objects because sub classes might have a
// meaningful toString() override.
if (!type.isFinal()) {
return false;
}
Types types = state.getTypes();
Names names = state.getNames();
// find Object.toString
MethodSymbol toString =
(MethodSymbol) state.getSymtab().objectType.tsym.members().findFirst(names.toString);
// We explore the superclasses of the receiver type as well as the interfaces it
// implements and we collect all overrides of java.lang.Object.toString(). If one of those
// overrides is present, then we don't flag it.
return Iterables.isEmpty(
types
.membersClosure(type, /* skipInterface= */ false)
.getSymbolsByName(
names.toString,
m ->
m != toString
&& m.overrides(toString, type.tsym, types, /* checkResult= */ false)));
}
@Inject
ObjectToString(ErrorProneFlags flags) {
super(flags);
}
@Override
protected TypePredicate typePredicate() {
return ObjectToString::finalNoOverrides;
}
@Override
protected Optional<String> descriptionMessageForDefaultMatch(Type type, VisitorState state) {
return Optional.of(
String.format(
"%1$s is final and does not override Object.toString, so converting it to a string"
+ " will print its identity (e.g. `%2$s@4488aabb`) instead of useful information.",
SuggestedFixes.prettyType(type, state), type.tsym.getSimpleName()));
}
@Override
protected Optional<Fix> implicitToStringFix(ExpressionTree tree, VisitorState state) {
return Optional.empty();
}
@Override
protected Optional<Fix> toStringFix(Tree parent, ExpressionTree tree, VisitorState state) {
return Optional.empty();
}
}
| ObjectToString |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/logging/LogFilterFactory.java | {
"start": 426,
"end": 1401
} | interface ____ {
int MIN_PRIORITY = Integer.MAX_VALUE;
int DEFAULT_PRIORITY = 0;
Filter create(String className) throws Exception;
default int priority() {
return DEFAULT_PRIORITY;
}
static LogFilterFactory load() {
LogFilterFactory result = null;
ServiceLoader<LogFilterFactory> load = ServiceLoader.load(LogFilterFactory.class);
for (LogFilterFactory next : load) {
if (result == null) {
result = next;
} else {
if (next.priority() < result.priority()) {
result = next;
}
}
}
if (result == null) {
result = new ReflectionLogFilterFactory();
}
return result;
}
/**
* The default implementation used when no other implementation is found.
* This simply calls the class' no-arg constructor (and fails if one does not exist).
*/
| LogFilterFactory |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/AbstractByteBufTest.java | {
"start": 3123,
"end": 3202
} | class ____ channel buffers.
*
* <p>Copy from netty 4.1.32.Final.
*/
abstract | for |
java | apache__dubbo | dubbo-plugin/dubbo-rest-jaxrs/src/test/java/org/apache/dubbo/rpc/protocol/tri/rest/support/jaxrs/compatible/filter/TestContainerRequestFilter.java | {
"start": 1153,
"end": 1418
} | class ____ implements ContainerRequestFilter {
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
requestContext.abortWith(Response.status(200).entity("return-success").build());
}
}
| TestContainerRequestFilter |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/handler/predicate/QueryRoutePredicateFactoryTests.java | {
"start": 4229,
"end": 4526
} | class ____ {
@Value("${test.uri}")
private String uri;
@Bean
RouteLocator queryRouteLocator(RouteLocatorBuilder builder) {
return builder.routes()
.route("foo_query_param", r -> r.query("foo", "bar").filters(f -> f.prefixPath("/httpbin")).uri(uri))
.build();
}
}
}
| TestConfig |
java | google__guava | android/guava-tests/benchmark/com/google/common/io/ByteSourceAsCharSourceReadBenchmark.java | {
"start": 1270,
"end": 5371
} | enum ____ {
TO_BYTE_ARRAY_NEW_STRING {
@Override
String read(ByteSource byteSource, Charset cs) throws IOException {
return new String(byteSource.read(), cs);
}
},
USING_CHARSTREAMS_COPY {
@Override
String read(ByteSource byteSource, Charset cs) throws IOException {
StringBuilder sb = new StringBuilder();
try (InputStreamReader reader = new InputStreamReader(byteSource.openStream(), cs)) {
CharStreams.copy(reader, sb);
}
return sb.toString();
}
},
// It really seems like this should be faster than TO_BYTE_ARRAY_NEW_STRING. But it just isn't
// my best guess is that the jdk authors have spent more time optimizing that callpath than this
// one. (StringCoding$StringDecoder vs. StreamDecoder). StringCoding has a ton of special cases
// theoretically we could duplicate all that logic here to try to beat 'new String' or at least
// come close.
USING_DECODER_WITH_SIZE_HINT {
@Override
String read(ByteSource byteSource, Charset cs) throws IOException {
Optional<Long> size = byteSource.sizeIfKnown();
// if we know the size and it fits in an int
if (size.isPresent() && size.get().longValue() == size.get().intValue()) {
// otherwise try to presize a StringBuilder
// it is kind of lame that we need to construct a decoder to access this value.
// if this is a concern we could add special cases for some known charsets (like utf8)
// or we could avoid inputstreamreader and use the decoder api directly
// TODO(lukes): in a real implementation we would need to handle overflow conditions
int maxChars = (int) (size.get().intValue() * cs.newDecoder().maxCharsPerByte());
char[] buffer = new char[maxChars];
int bufIndex = 0;
int remaining = buffer.length;
try (InputStreamReader reader = new InputStreamReader(byteSource.openStream(), cs)) {
int nRead = 0;
while (remaining > 0 && (nRead = reader.read(buffer, bufIndex, remaining)) != -1) {
bufIndex += nRead;
remaining -= nRead;
}
if (nRead == -1) {
// we reached EOF
return new String(buffer, 0, bufIndex);
}
// otherwise we got the size wrong. This can happen if the size changes between when
// we called sizeIfKnown and when we started reading the file (or I guess if
// maxCharsPerByte is wrong)
// Fallback to an incremental approach
StringBuilder builder = new StringBuilder(bufIndex + 32);
builder.append(buffer, 0, bufIndex);
buffer = null; // release for gc
CharStreams.copy(reader, builder);
return builder.toString();
}
} else {
return TO_BYTE_ARRAY_NEW_STRING.read(byteSource, cs);
}
}
};
abstract String read(ByteSource byteSource, Charset cs) throws IOException;
}
@Param({"UTF-8"})
String charsetName;
@Param ReadStrategy strategy;
@Param({"10", "1024", "1048576"})
int size;
Charset charset;
ByteSource data;
@BeforeExperiment
public void setUp() {
charset = Charset.forName(charsetName);
StringBuilder sb = new StringBuilder();
Random random = new Random(0xdeadbeef); // for unpredictable but reproducible behavior
sb.ensureCapacity(size);
for (int k = 0; k < size; k++) {
// [9-127) includes all ascii non-control characters
sb.append((char) (random.nextInt(127 - 9) + 9));
}
String string = sb.toString();
sb.setLength(0);
data = ByteSource.wrap(string.getBytes(charset));
}
@Benchmark
public int timeCopy(int reps) throws IOException {
int r = 0;
Charset localCharset = charset;
ByteSource localData = data;
ReadStrategy localStrategy = strategy;
for (int i = 0; i < reps; i++) {
r += localStrategy.read(localData, localCharset).hashCode();
}
return r;
}
}
| ReadStrategy |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableThrottleLatestTest.java | {
"start": 1210,
"end": 19969
} | class ____ extends RxJavaTest {
@Test
public void just() {
Flowable.just(1)
.throttleLatest(1, TimeUnit.MINUTES)
.test()
.assertResult(1);
}
@Test
public void range() {
Flowable.range(1, 5)
.throttleLatest(1, TimeUnit.MINUTES)
.test()
.assertResult(1);
}
@Test
public void rangeEmitLatest() {
Flowable.range(1, 5)
.throttleLatest(1, TimeUnit.MINUTES, true)
.test()
.assertResult(1, 5);
}
@Test
public void error() {
Flowable.error(new TestException())
.throttleLatest(1, TimeUnit.MINUTES)
.test()
.assertFailure(TestException.class);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f) throws Exception {
return f.throttleLatest(1, TimeUnit.MINUTES);
}
});
}
@Test
public void badRequest() {
TestHelper.assertBadRequestReported(
Flowable.never()
.throttleLatest(1, TimeUnit.MINUTES)
);
}
@Test
public void normal() {
TestScheduler sch = new TestScheduler();
PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch).test();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onNext(3);
ts.assertValuesOnly(1);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3);
pp.onNext(4);
ts.assertValuesOnly(1, 3);
pp.onNext(5);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3, 5);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3, 5);
pp.onNext(6);
ts.assertValuesOnly(1, 3, 5, 6);
pp.onNext(7);
pp.onComplete();
ts.assertResult(1, 3, 5, 6);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertResult(1, 3, 5, 6);
}
@Test
public void normalEmitLast() {
TestScheduler sch = new TestScheduler();
PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, true).test();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onNext(3);
ts.assertValuesOnly(1);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3);
pp.onNext(4);
ts.assertValuesOnly(1, 3);
pp.onNext(5);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3, 5);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3, 5);
pp.onNext(6);
ts.assertValuesOnly(1, 3, 5, 6);
pp.onNext(7);
pp.onComplete();
ts.assertResult(1, 3, 5, 6, 7);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertResult(1, 3, 5, 6, 7);
}
@Test
public void missingBackpressureExceptionFirst() throws Throwable {
TestScheduler sch = new TestScheduler();
Action onCancel = mock(Action.class);
Flowable.just(1, 2)
.doOnCancel(onCancel)
.throttleLatest(1, TimeUnit.MINUTES, sch)
.test(0)
.assertFailure(MissingBackpressureException.class);
verify(onCancel).run();
}
@Test
public void missingBackpressureExceptionLatest() throws Throwable {
TestScheduler sch = new TestScheduler();
Action onCancel = mock(Action.class);
TestSubscriber<Integer> ts = Flowable.just(1, 2)
.concatWith(Flowable.<Integer>never())
.doOnCancel(onCancel)
.throttleLatest(1, TimeUnit.SECONDS, sch, true)
.test(1);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertFailure(MissingBackpressureException.class, 1);
verify(onCancel).run();
}
@Test
public void missingBackpressureExceptionLatestComplete() throws Throwable {
TestScheduler sch = new TestScheduler();
Action onCancel = mock(Action.class);
PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = pp
.doOnCancel(onCancel)
.throttleLatest(1, TimeUnit.SECONDS, sch, true)
.test(1);
pp.onNext(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onComplete();
ts.assertFailure(MissingBackpressureException.class, 1);
verify(onCancel, never()).run();
}
@Test
public void take() throws Throwable {
Action onCancel = mock(Action.class);
Flowable.range(1, 5)
.doOnCancel(onCancel)
.throttleLatest(1, TimeUnit.MINUTES)
.take(1)
.test()
.assertResult(1);
verify(onCancel).run();
}
@Test
public void reentrantComplete() {
TestScheduler sch = new TestScheduler();
final PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
pp.onNext(2);
}
if (t == 2) {
pp.onComplete();
}
}
};
pp.throttleLatest(1, TimeUnit.SECONDS, sch).subscribe(ts);
pp.onNext(1);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertResult(1, 2);
}
/** Emit 1, 2, 3, then advance time by a second; 1 and 3 should end up in downstream, 2 should be dropped. */
@Test
public void onDroppedBasicNoEmitLast() {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.test();
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(3);
ts.assertValuesOnly(1);
drops.assertValuesOnly(2);
sch.advanceTimeBy(1, TimeUnit.SECONDS);
ts.assertValuesOnly(1, 3);
drops.assertValuesOnly(2);
pp.onComplete();
ts.assertResult(1, 3);
drops.assertValuesOnly(2);
}
/** Emit 1, 2, 3; 1 should end up in downstream, 2, 3 should be dropped. */
@Test
public void onDroppedBasicNoEmitLastDropLast() {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.test();
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(3);
ts.assertValuesOnly(1);
drops.assertValuesOnly(2);
pp.onComplete();
ts.assertResult(1);
drops.assertValuesOnly(2, 3);
}
/** Emit 1, 2, 3; 1 and 3 should end up in downstream, 2 should be dropped. */
@Test
public void onDroppedBasicEmitLast() {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, true, drops::onNext)
.test();
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onNext(3);
ts.assertValuesOnly(1);
drops.assertValuesOnly(2);
pp.onComplete();
ts.assertResult(1, 3);
drops.assertValuesOnly(2);
}
/** Emit 1, 2, 3; 3 should trigger an error to the downstream because 2 is dropped and the callback crashes. */
@Test
public void onDroppedBasicNoEmitLastFirstDropCrash() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriber<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, d -> {
if (d == 2) {
throw new TestException("forced");
}
})
.test();
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onNext(3);
ts.assertFailure(TestException.class, 1);
verify(whenDisposed).run();
}
/**
* Emit 1, 2, Error; the error should trigger the drop callback and crash it too,
* downstream gets 1, composite(source, drop-crash).
*/
@Test
public void onDroppedBasicNoEmitLastOnErrorDropCrash() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, d -> { throw new TestException("forced " + d); })
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onError(new TestException("source"));
ts.assertFailure(CompositeException.class, 1);
TestHelper.assertCompositeExceptions(ts, TestException.class, "source", TestException.class, "forced 2");
verify(whenDisposed, never()).run();
}
/**
* Emit 1, 2, 3; 3 should trigger a drop-crash for 2, which then would trigger the error path and drop-crash for 3,
* the last item not delivered, downstream gets 1, composite(drop-crash 2, drop-crash 3).
*/
@Test
public void onDroppedBasicEmitLastOnErrorDropCrash() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, true, d -> { throw new TestException("forced " + d); })
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onNext(3);
ts.assertFailure(CompositeException.class, 1);
TestHelper.assertCompositeExceptions(ts, TestException.class, "forced 2", TestException.class, "forced 3");
verify(whenDisposed).run();
}
/** Emit 1, complete; Downstream gets 1, complete, no drops. */
@Test
public void onDroppedBasicNoEmitLastNoLastToDrop() {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.test();
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onComplete();
ts.assertResult(1);
drops.assertEmpty();
}
/** Emit 1, error; Downstream gets 1, error, no drops. */
@Test
public void onDroppedErrorNoEmitLastNoLastToDrop() {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriber<Integer> ts = pp.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.test();
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
drops.assertEmpty();
pp.onError(new TestException());
ts.assertFailure(TestException.class, 1);
drops.assertEmpty();
}
/**
* Emit 1, 2, complete; complete should crash drop, downstream gets 1, drop-crash 2.
*/
@Test
public void onDroppedHasLastNoEmitLastDropCrash() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, d -> { throw new TestException("forced " + d); })
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
pp.onComplete();
ts.assertFailureAndMessage(TestException.class, "forced 2", 1);
verify(whenDisposed, never()).run();
}
/**
* Emit 1, 2 then dispose the sequence; downstream gets 1, drop should get for 2.
*/
@Test
public void onDroppedDisposeDrops() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
ts.cancel();
ts.assertValuesOnly(1);
drops.assertValuesOnly(2);
verify(whenDisposed).run();
}
/**
* Emit 1 then dispose the sequence; downstream gets 1, drop should not get called.
*/
@Test
public void onDroppedDisposeNoDrops() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
ts.cancel();
ts.assertValuesOnly(1);
drops.assertEmpty();
verify(whenDisposed).run();
}
/**
* Emit 1, 2 then dispose the sequence; downstream gets 1, global error handler should get drop-crash 2.
*/
@Test
public void onDroppedDisposeCrashesDrop() throws Throwable {
TestHelper.withErrorTracking(errors -> {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, d -> { throw new TestException("forced " + d); })
.subscribeWith(new TestSubscriberEx<>());
ts.assertEmpty();
pp.onNext(1);
ts.assertValuesOnly(1);
pp.onNext(2);
ts.assertValuesOnly(1);
ts.cancel();
ts.assertValuesOnly(1);
verify(whenDisposed).run();
TestHelper.assertUndeliverable(errors, 0, TestException.class, "forced 2");
});
}
/** Emit 1 but downstream is backpressured; downstream gets MBE, drops gets 1. */
@Test
public void onDroppedBackpressured() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
TestSubscriber<Object> drops = new TestSubscriber<>();
drops.onSubscribe(EmptySubscription.INSTANCE);
Action whenDisposed = mock(Action.class);
TestSubscriber<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, drops::onNext)
.test(0L);
ts.assertEmpty();
drops.assertEmpty();
pp.onNext(1);
ts.assertFailure(MissingBackpressureException.class);
drops.assertValuesOnly(1);
verify(whenDisposed).run();
}
/** Emit 1 but downstream is backpressured; drop crashes, downstream gets composite(MBE, drop-crash 1). */
@Test
public void onDroppedBackpressuredDropCrash() throws Throwable {
PublishProcessor<Integer> pp =PublishProcessor.create();
TestScheduler sch = new TestScheduler();
Action whenDisposed = mock(Action.class);
TestSubscriberEx<Integer> ts = pp
.doOnCancel(whenDisposed)
.throttleLatest(1, TimeUnit.SECONDS, sch, false, d -> { throw new TestException("forced " + d); })
.subscribeWith(new TestSubscriberEx<>(0L));
ts.assertEmpty();
pp.onNext(1);
ts.assertFailure(CompositeException.class);
TestHelper.assertCompositeExceptions(ts,
MissingBackpressureException.class, "Could not emit value due to lack of requests",
TestException.class, "forced 1");
verify(whenDisposed).run();
}
}
| FlowableThrottleLatestTest |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/extension-removed-resources/runner/src/test/java/org/acme/MetaInfResourceIT.java | {
"start": 104,
"end": 211
} | class ____ extends MetaInfResourceTest {
// Execute the same tests but in native mode.
}
| MetaInfResourceIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResult.java | {
"start": 1164,
"end": 4130
} | class ____ implements SimulateDocumentResult {
private final WriteableIngestDocument ingestDocument;
private final Exception failure;
public static final ConstructingObjectParser<SimulateDocumentBaseResult, Void> PARSER = new ConstructingObjectParser<>(
"simulate_document_base_result",
true,
a -> {
if (a[1] == null) {
assert a[0] != null;
return new SimulateDocumentBaseResult(((WriteableIngestDocument) a[0]).getIngestDocument());
} else {
assert a[0] == null;
return new SimulateDocumentBaseResult((ElasticsearchException) a[1]);
}
}
);
static {
PARSER.declareObject(
optionalConstructorArg(),
WriteableIngestDocument.INGEST_DOC_PARSER,
new ParseField(WriteableIngestDocument.DOC_FIELD)
);
PARSER.declareObject(optionalConstructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField("error"));
}
public SimulateDocumentBaseResult(IngestDocument ingestDocument) {
Exception failure = null;
WriteableIngestDocument wid = null;
if (ingestDocument != null) {
try {
wid = new WriteableIngestDocument(ingestDocument);
} catch (Exception ex) {
failure = ex;
}
}
this.ingestDocument = wid;
this.failure = failure;
}
public SimulateDocumentBaseResult(Exception failure) {
this.ingestDocument = null;
this.failure = failure;
}
/**
* Read from a stream.
*/
public SimulateDocumentBaseResult(StreamInput in) throws IOException {
failure = in.readException();
ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeException(failure);
out.writeOptionalWriteable(ingestDocument);
}
public IngestDocument getIngestDocument() {
if (ingestDocument == null) {
return null;
}
return ingestDocument.getIngestDocument();
}
public Exception getFailure() {
return failure;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (failure == null && ingestDocument == null) {
builder.nullValue();
return builder;
}
builder.startObject();
if (failure == null) {
ingestDocument.toXContent(builder, params);
} else {
ElasticsearchException.generateFailureXContent(builder, params, failure, true);
}
builder.endObject();
return builder;
}
public static SimulateDocumentBaseResult fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}
| SimulateDocumentBaseResult |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/AbstractProcessorContextTest.java | {
"start": 2701,
"end": 7137
} | class ____ {
private final MockStreamsMetrics metrics = new MockStreamsMetrics(new Metrics());
private final AbstractProcessorContext<?, ?> context = new TestProcessorContext(metrics);
private final MockKeyValueStore stateStore = new MockKeyValueStore("store", false);
private final Headers headers = new RecordHeaders(new Header[]{new RecordHeader("key", "value".getBytes())});
private final ProcessorRecordContext recordContext = new ProcessorRecordContext(10, System.currentTimeMillis(), 1, "foo", headers);
@BeforeEach
public void before() {
context.setRecordContext(recordContext);
}
@Test
public void shouldThrowIllegalStateExceptionOnRegisterWhenContextIsInitialized() {
context.initialize();
try {
context.register(stateStore, null);
fail("should throw illegal state exception when context already initialized");
} catch (final IllegalStateException e) {
// pass
}
}
@Test
public void shouldNotThrowIllegalStateExceptionOnRegisterWhenContextIsNotInitialized() {
context.register(stateStore, null);
}
@Test
public void shouldThrowNullPointerOnRegisterIfStateStoreIsNull() {
assertThrows(NullPointerException.class, () -> context.register(null, null));
}
@Test
public void shouldReturnNullTopicIfNoRecordContext() {
context.setRecordContext(null);
assertThat(context.topic(), is(nullValue()));
}
@Test
public void shouldNotThrowNullPointerExceptionOnTopicIfRecordContextTopicIsNull() {
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, null, new RecordHeaders()));
assertThat(context.topic(), nullValue());
}
@Test
public void shouldReturnTopicFromRecordContext() {
assertThat(context.topic(), equalTo(recordContext.topic()));
}
@Test
public void shouldReturnNullIfTopicEqualsNonExistTopic() {
context.setRecordContext(null);
assertThat(context.topic(), nullValue());
}
@Test
public void shouldReturnDummyPartitionIfNoRecordContext() {
context.setRecordContext(null);
assertThat(context.partition(), is(-1));
}
@Test
public void shouldReturnPartitionFromRecordContext() {
assertThat(context.partition(), equalTo(recordContext.partition()));
}
@Test
public void shouldThrowIllegalStateExceptionOnOffsetIfNoRecordContext() {
context.setRecordContext(null);
try {
context.offset();
} catch (final IllegalStateException e) {
// pass
}
}
@Test
public void shouldReturnOffsetFromRecordContext() {
assertThat(context.offset(), equalTo(recordContext.offset()));
}
@Test
public void shouldReturnDummyTimestampIfNoRecordContext() {
context.setRecordContext(null);
assertThat(context.timestamp(), is(0L));
}
@Test
public void shouldReturnTimestampFromRecordContext() {
assertThat(context.timestamp(), equalTo(recordContext.timestamp()));
}
@Test
public void shouldReturnHeadersFromRecordContext() {
assertThat(context.headers(), equalTo(recordContext.headers()));
}
@Test
public void shouldReturnEmptyHeadersIfHeadersAreNotSet() {
context.setRecordContext(null);
assertThat(context.headers(), is(emptyIterable()));
}
@Test
public void appConfigsShouldReturnParsedValues() {
assertThat(
context.appConfigs().get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG),
equalTo(RocksDBConfigSetter.class)
);
}
@Test
public void appConfigsShouldReturnUnrecognizedValues() {
assertThat(
context.appConfigs().get("user.supplied.config"),
equalTo("user-supplied-value")
);
}
@Test
public void shouldThrowErrorIfSerdeDefaultNotSet() {
final Properties config = getStreamsConfig();
config.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, RocksDBConfigSetter.class.getName());
config.put("user.supplied.config", "user-supplied-value");
final TestProcessorContext pc = new TestProcessorContext(metrics, config);
assertThrows(ConfigException.class, pc::keySerde);
assertThrows(ConfigException.class, pc::valueSerde);
}
private static | AbstractProcessorContextTest |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/Logger.java | {
"start": 1955,
"end": 2222
} | class ____
* than sharing Loggers. Instead, {@link Marker Markers} should be used for shared, filterable identification.
* </p>
* <p>
* For service provider implementations, it is recommended to extend the
* {@link org.apache.logging.log4j.spi.AbstractLogger} | rather |
java | netty__netty | transport/src/main/java/io/netty/channel/pool/SimpleChannelPool.java | {
"start": 1621,
"end": 17301
} | class ____ implements ChannelPool {
private static final AttributeKey<SimpleChannelPool> POOL_KEY =
AttributeKey.newInstance("io.netty.channel.pool.SimpleChannelPool");
private final Deque<Channel> deque = PlatformDependent.newConcurrentDeque();
private final ChannelPoolHandler handler;
private final ChannelHealthChecker healthCheck;
private final Bootstrap bootstrap;
private final boolean releaseHealthCheck;
private final boolean lastRecentUsed;
/**
* Creates a new instance using the {@link ChannelHealthChecker#ACTIVE}.
*
* @param bootstrap the {@link Bootstrap} that is used for connections
* @param handler the {@link ChannelPoolHandler} that will be notified for the different pool actions
*/
public SimpleChannelPool(Bootstrap bootstrap, final ChannelPoolHandler handler) {
this(bootstrap, handler, ChannelHealthChecker.ACTIVE);
}
/**
* Creates a new instance.
*
* @param bootstrap the {@link Bootstrap} that is used for connections
* @param handler the {@link ChannelPoolHandler} that will be notified for the different pool actions
* @param healthCheck the {@link ChannelHealthChecker} that will be used to check if a {@link Channel} is
* still healthy when obtain from the {@link ChannelPool}
*/
public SimpleChannelPool(Bootstrap bootstrap, final ChannelPoolHandler handler, ChannelHealthChecker healthCheck) {
this(bootstrap, handler, healthCheck, true);
}
/**
* Creates a new instance.
*
* @param bootstrap the {@link Bootstrap} that is used for connections
* @param handler the {@link ChannelPoolHandler} that will be notified for the different pool actions
* @param healthCheck the {@link ChannelHealthChecker} that will be used to check if a {@link Channel} is
* still healthy when obtain from the {@link ChannelPool}
* @param releaseHealthCheck will check channel health before offering back if this parameter set to {@code true};
* otherwise, channel health is only checked at acquisition time
*/
public SimpleChannelPool(Bootstrap bootstrap, final ChannelPoolHandler handler, ChannelHealthChecker healthCheck,
boolean releaseHealthCheck) {
this(bootstrap, handler, healthCheck, releaseHealthCheck, true);
}
/**
* Creates a new instance.
*
* @param bootstrap the {@link Bootstrap} that is used for connections
* @param handler the {@link ChannelPoolHandler} that will be notified for the different pool actions
* @param healthCheck the {@link ChannelHealthChecker} that will be used to check if a {@link Channel} is
* still healthy when obtain from the {@link ChannelPool}
* @param releaseHealthCheck will check channel health before offering back if this parameter set to {@code true};
* otherwise, channel health is only checked at acquisition time
* @param lastRecentUsed {@code true} {@link Channel} selection will be LIFO, if {@code false} FIFO.
*/
public SimpleChannelPool(Bootstrap bootstrap, final ChannelPoolHandler handler, ChannelHealthChecker healthCheck,
boolean releaseHealthCheck, boolean lastRecentUsed) {
this.handler = checkNotNull(handler, "handler");
this.healthCheck = checkNotNull(healthCheck, "healthCheck");
this.releaseHealthCheck = releaseHealthCheck;
// Clone the original Bootstrap as we want to set our own handler
this.bootstrap = checkNotNull(bootstrap, "bootstrap").clone();
this.bootstrap.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
assert ch.eventLoop().inEventLoop();
handler.channelCreated(ch);
}
});
this.lastRecentUsed = lastRecentUsed;
}
/**
* Returns the {@link Bootstrap} this pool will use to open new connections.
*
* @return the {@link Bootstrap} this pool will use to open new connections
*/
protected Bootstrap bootstrap() {
return bootstrap;
}
/**
* Returns the {@link ChannelPoolHandler} that will be notified for the different pool actions.
*
* @return the {@link ChannelPoolHandler} that will be notified for the different pool actions
*/
protected ChannelPoolHandler handler() {
return handler;
}
/**
* Returns the {@link ChannelHealthChecker} that will be used to check if a {@link Channel} is healthy.
*
* @return the {@link ChannelHealthChecker} that will be used to check if a {@link Channel} is healthy
*/
protected ChannelHealthChecker healthChecker() {
return healthCheck;
}
/**
* Indicates whether this pool will check the health of channels before offering them back into the pool.
*
* @return {@code true} if this pool will check the health of channels before offering them back into the pool, or
* {@code false} if channel health is only checked at acquisition time
*/
protected boolean releaseHealthCheck() {
return releaseHealthCheck;
}
@Override
public final Future<Channel> acquire() {
return acquire(bootstrap.config().group().next().<Channel>newPromise());
}
@Override
public Future<Channel> acquire(final Promise<Channel> promise) {
return acquireHealthyFromPoolOrNew(checkNotNull(promise, "promise"));
}
/**
* Tries to retrieve healthy channel from the pool if any or creates a new channel otherwise.
* @param promise the promise to provide acquire result.
* @return future for acquiring a channel.
*/
private Future<Channel> acquireHealthyFromPoolOrNew(final Promise<Channel> promise) {
try {
final Channel ch = pollChannel();
if (ch == null) {
// No Channel left in the pool bootstrap a new Channel
Bootstrap bs = bootstrap.clone();
bs.attr(POOL_KEY, this);
ChannelFuture f = connectChannel(bs);
if (f.isDone()) {
notifyConnect(f, promise);
} else {
f.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
notifyConnect(future, promise);
}
});
}
} else {
EventLoop loop = ch.eventLoop();
if (loop.inEventLoop()) {
doHealthCheck(ch, promise);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
doHealthCheck(ch, promise);
}
});
}
}
} catch (Throwable cause) {
promise.tryFailure(cause);
}
return promise;
}
private void notifyConnect(ChannelFuture future, Promise<Channel> promise) {
Channel channel = null;
try {
if (future.isSuccess()) {
channel = future.channel();
handler.channelAcquired(channel);
if (!promise.trySuccess(channel)) {
// Promise was completed in the meantime (like cancelled), just release the channel again
release(channel);
}
} else {
promise.tryFailure(future.cause());
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
}
private void doHealthCheck(final Channel channel, final Promise<Channel> promise) {
try {
assert channel.eventLoop().inEventLoop();
Future<Boolean> f = healthCheck.isHealthy(channel);
if (f.isDone()) {
notifyHealthCheck(f, channel, promise);
} else {
f.addListener(new FutureListener<Boolean>() {
@Override
public void operationComplete(Future<Boolean> future) {
notifyHealthCheck(future, channel, promise);
}
});
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
}
private void notifyHealthCheck(Future<Boolean> future, Channel channel, Promise<Channel> promise) {
try {
assert channel.eventLoop().inEventLoop();
if (future.isSuccess() && future.getNow()) {
channel.attr(POOL_KEY).set(this);
handler.channelAcquired(channel);
promise.setSuccess(channel);
} else {
closeChannel(channel);
acquireHealthyFromPoolOrNew(promise);
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
}
/**
* Bootstrap a new {@link Channel}. The default implementation uses {@link Bootstrap#connect()}, sub-classes may
* override this.
* <p>
* The {@link Bootstrap} that is passed in here is cloned via {@link Bootstrap#clone()}, so it is safe to modify.
*/
protected ChannelFuture connectChannel(Bootstrap bs) {
return bs.connect();
}
@Override
public final Future<Void> release(Channel channel) {
return release(channel, channel.eventLoop().<Void>newPromise());
}
@Override
public Future<Void> release(final Channel channel, final Promise<Void> promise) {
try {
checkNotNull(channel, "channel");
checkNotNull(promise, "promise");
EventLoop loop = channel.eventLoop();
if (loop.inEventLoop()) {
doReleaseChannel(channel, promise);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
doReleaseChannel(channel, promise);
}
});
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
return promise;
}
private void doReleaseChannel(Channel channel, Promise<Void> promise) {
try {
assert channel.eventLoop().inEventLoop();
// Remove the POOL_KEY attribute from the Channel and check if it was acquired from this pool, if not fail.
if (channel.attr(POOL_KEY).getAndSet(null) != this) {
closeAndFail(channel,
// Better include a stacktrace here as this is an user error.
new IllegalArgumentException(
"Channel " + channel + " was not acquired from this ChannelPool"),
promise);
} else {
if (releaseHealthCheck) {
doHealthCheckOnRelease(channel, promise);
} else {
releaseAndOffer(channel, promise);
}
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
}
private void doHealthCheckOnRelease(final Channel channel, final Promise<Void> promise) throws Exception {
final Future<Boolean> f = healthCheck.isHealthy(channel);
if (f.isDone()) {
releaseAndOfferIfHealthy(channel, promise, f);
} else {
f.addListener(new FutureListener<Boolean>() {
@Override
public void operationComplete(Future<Boolean> future) throws Exception {
releaseAndOfferIfHealthy(channel, promise, f);
}
});
}
}
/**
* Adds the channel back to the pool only if the channel is healthy.
* @param channel the channel to put back to the pool
* @param promise offer operation promise.
* @param future the future that contains information fif channel is healthy or not.
* @throws Exception in case when failed to notify handler about release operation.
*/
private void releaseAndOfferIfHealthy(Channel channel, Promise<Void> promise, Future<Boolean> future) {
try {
if (future.getNow()) { //channel turns out to be healthy, offering and releasing it.
releaseAndOffer(channel, promise);
} else { //channel not healthy, just releasing it.
handler.channelReleased(channel);
promise.setSuccess(null);
}
} catch (Throwable cause) {
closeAndFail(channel, cause, promise);
}
}
private void releaseAndOffer(Channel channel, Promise<Void> promise) throws Exception {
if (offerChannel(channel)) {
handler.channelReleased(channel);
promise.setSuccess(null);
} else {
closeAndFail(channel, new ChannelPoolFullException(), promise);
}
}
private void closeChannel(Channel channel) throws Exception {
channel.attr(POOL_KEY).getAndSet(null);
channel.close();
}
private void closeAndFail(Channel channel, Throwable cause, Promise<?> promise) {
if (channel != null) {
try {
closeChannel(channel);
} catch (Throwable t) {
promise.tryFailure(t);
}
}
promise.tryFailure(cause);
}
/**
* Poll a {@link Channel} out of the internal storage to reuse it. This will return {@code null} if no
* {@link Channel} is ready to be reused.
*
* Sub-classes may override {@link #pollChannel()} and {@link #offerChannel(Channel)}. Be aware that
* implementations of these methods needs to be thread-safe!
*/
protected Channel pollChannel() {
return lastRecentUsed ? deque.pollLast() : deque.pollFirst();
}
/**
* Offer a {@link Channel} back to the internal storage. This will return {@code true} if the {@link Channel}
* could be added, {@code false} otherwise.
*
* Sub-classes may override {@link #pollChannel()} and {@link #offerChannel(Channel)}. Be aware that
* implementations of these methods needs to be thread-safe!
*/
protected boolean offerChannel(Channel channel) {
return deque.offer(channel);
}
@Override
public void close() {
for (;;) {
Channel channel = pollChannel();
if (channel == null) {
break;
}
// Just ignore any errors that are reported back from close().
channel.close().awaitUninterruptibly();
}
}
/**
* Closes the pool in an async manner.
*
* @return Future which represents completion of the close task
*/
public Future<Void> closeAsync() {
// Execute close asynchronously in case this is being invoked on an eventloop to avoid blocking
return GlobalEventExecutor.INSTANCE.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
close();
return null;
}
});
}
private static final | SimpleChannelPool |
java | micronaut-projects__micronaut-core | context/src/main/java/io/micronaut/runtime/context/env/ConfigurationAdvice.java | {
"start": 1069,
"end": 1431
} | interface ____ {
/**
* @return Is the method call a bean lookup
*/
boolean bean() default false;
/**
* @return Is it annotated with {@link io.micronaut.context.annotation.EachProperty}
*/
boolean iterable() default false;
/**
* @return The property to lookup
*/
String value() default "";
}
| ConfigurationAdvice |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/controller/v3/MetricsControllerV3.java | {
"start": 6138,
"end": 9827
} | class ____ implements Callback<Map> {
Map<String, Object> responseMap;
CountDownLatch latch;
String dataId;
String group;
String namespaceId;
String ip;
Member member;
public ClusterMetricsCallBack(Map<String, Object> responseMap, CountDownLatch latch, String dataId,
String group, String namespaceId, String ip, Member member) {
this.responseMap = responseMap;
this.latch = latch;
this.dataId = dataId;
this.group = group;
this.namespaceId = namespaceId;
this.member = member;
this.ip = ip;
}
@Override
public void onReceive(RestResult<Map> result) {
if (result.ok()) {
responseMap.putAll(result.getData());
}
latch.countDown();
}
@Override
public void onError(Throwable throwable) {
Loggers.CORE.error(
"Get config metrics error from member address={}, ip={},dataId={},group={},namespaceId={},error={}",
member.getAddress(), ip, dataId, group, namespaceId, throwable);
latch.countDown();
}
@Override
public void onCancel() {
latch.countDown();
}
}
/**
* Get client config listener lists of subscriber in local machine.
*/
@GetMapping("/ip")
@Secured(resource = Constants.METRICS_CONTROLLER_V3_ADMIN_PATH, action = ActionTypes.READ,
signType = SignType.CONFIG, apiType = ApiType.ADMIN_API)
public Result<Map<String, Object>> getClientMetrics(@RequestParam("ip") String ip,
@RequestParam(value = "dataId", required = false) String dataId,
@RequestParam(value = "groupName", required = false) String groupName,
@RequestParam(value = "namespaceId", required = false) String namespaceId) throws NacosException {
ParamUtils.checkTenant(namespaceId);
namespaceId = NamespaceUtil.processNamespaceParameter(namespaceId);
ParamUtils.checkParam(dataId, groupName, "default", "default");
Map<String, Object> metrics = new HashMap<>(16);
List<Connection> connectionsByIp = connectionManager.getConnectionByIp(ip);
for (Connection connectionByIp : connectionsByIp) {
try {
ClientConfigMetricRequest clientMetrics = new ClientConfigMetricRequest();
if (StringUtils.isNotBlank(dataId)) {
clientMetrics.getMetricsKeys().add(ClientConfigMetricRequest.MetricsKey.build(CACHE_DATA,
GroupKey2.getKey(dataId, groupName, namespaceId)));
clientMetrics.getMetricsKeys().add(ClientConfigMetricRequest.MetricsKey.build(SNAPSHOT_DATA,
GroupKey2.getKey(dataId, groupName, namespaceId)));
}
ClientConfigMetricResponse request1 = (ClientConfigMetricResponse) connectionByIp.request(clientMetrics,
1000L);
metrics.putAll(request1.getMetrics());
} catch (Exception e) {
Loggers.CORE.error(
"Get config metrics error from client ip={},dataId={},groupName={},namespaceId={},error={}", ip,
dataId, groupName, namespaceId, e);
throw new NacosException(NacosException.SERVER_ERROR, e);
}
}
return Result.success(metrics);
}
} | ClusterMetricsCallBack |
java | dropwizard__dropwizard | dropwizard-client/src/main/java/io/dropwizard/client/proxy/NonProxyListProxyRoutePlanner.java | {
"start": 662,
"end": 2445
} | class ____ extends DefaultProxyRoutePlanner {
private static final Pattern WILDCARD = Pattern.compile("\\*");
private static final String REGEX_WILDCARD = ".*";
private List<Pattern> nonProxyHostPatterns;
public NonProxyListProxyRoutePlanner(HttpHost proxy, @Nullable List<String> nonProxyHosts) {
super(proxy, null);
nonProxyHostPatterns = getNonProxyHostPatterns(nonProxyHosts);
}
public NonProxyListProxyRoutePlanner(HttpHost proxy, SchemePortResolver schemePortResolver,
@Nullable List<String> nonProxyHosts) {
super(proxy, schemePortResolver);
this.nonProxyHostPatterns = getNonProxyHostPatterns(nonProxyHosts);
}
private List<Pattern> getNonProxyHostPatterns(@Nullable List<String> nonProxyHosts) {
if (nonProxyHosts == null) {
return Collections.emptyList();
}
final List<Pattern> patterns = new ArrayList<>(nonProxyHosts.size());
for (String nonProxyHost : nonProxyHosts) {
// Replaces a wildcard to a regular expression
patterns.add(Pattern.compile(WILDCARD.matcher(nonProxyHost).replaceAll(REGEX_WILDCARD)));
}
return Collections.unmodifiableList(patterns);
}
protected List<Pattern> getNonProxyHostPatterns() {
return nonProxyHostPatterns;
}
@Override
@Nullable
protected HttpHost determineProxy(HttpHost target, HttpContext context) throws HttpException {
for (Pattern nonProxyHostPattern : nonProxyHostPatterns) {
if (nonProxyHostPattern.matcher(target.getHostName()).matches()) {
return null;
}
}
return super.determineProxy(target, context);
}
}
| NonProxyListProxyRoutePlanner |
java | quarkusio__quarkus | extensions/amazon-lambda/maven-archetype/src/main/resources/archetype-resources/src/main/java/StreamLambda.java | {
"start": 284,
"end": 666
} | class ____ implements RequestStreamHandler {
@Override
public void handleRequest(InputStream inputStream, OutputStream outputStream, Context context) throws IOException {
int letter;
while ((letter = inputStream.read()) != -1) {
int character = Character.toUpperCase(letter);
outputStream.write(character);
}
}
}
| StreamLambda |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/spi/PersistEventListener.java | {
"start": 290,
"end": 681
} | interface ____ {
/**
* Handle the given create event.
*
* @param event The create event to be handled.
*/
void onPersist(PersistEvent event) throws HibernateException;
/**
* Handle the given create event.
*
* @param event The create event to be handled.
*/
void onPersist(PersistEvent event, PersistContext createdAlready) throws HibernateException;
}
| PersistEventListener |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/stages/TaskAttemptScanDirectoryStage.java | {
"start": 2677,
"end": 8536
} | class ____
extends AbstractJobOrTaskStage<Void, TaskManifest> {
private static final Logger LOG = LoggerFactory.getLogger(
TaskAttemptScanDirectoryStage.class);
public TaskAttemptScanDirectoryStage(
final StageConfig stageConfig) {
super(true, stageConfig, OP_STAGE_TASK_SCAN_DIRECTORY, false);
}
/**
* Build the Manifest.
* @return the manifest
* @throws IOException failure.
*/
@Override
protected TaskManifest executeStage(final Void arguments)
throws IOException {
final Path taskAttemptDir = getRequiredTaskAttemptDir();
final TaskManifest manifest = createTaskManifest(getStageConfig());
LOG.info("{}: scanning directory {}",
getName(), taskAttemptDir);
final int depth = scanDirectoryTree(manifest,
taskAttemptDir,
getDestinationDir(),
0, true);
List<FileEntry> filesToCommit = manifest.getFilesToCommit();
LongSummaryStatistics fileSummary = filesToCommit.stream()
.mapToLong(FileEntry::getSize)
.summaryStatistics();
long fileDataSize = fileSummary.getSum();
long fileCount = fileSummary.getCount();
int dirCount = manifest.getDestDirectories().size();
LOG.info("{}: directory {} contained {} file(s); data size {}",
getName(),
taskAttemptDir,
fileCount,
fileDataSize);
LOG.info("{}: Directory count = {}; maximum depth {}",
getName(),
dirCount,
depth);
// add statistics about the task output which, when aggregated, provides
// insight into structure of job, task skew, etc.
IOStatisticsStore iostats = getIOStatistics();
iostats.addSample(COMMITTER_TASK_DIRECTORY_COUNT_MEAN, dirCount);
iostats.addSample(COMMITTER_TASK_DIRECTORY_DEPTH_MEAN, depth);
iostats.addSample(COMMITTER_TASK_FILE_COUNT_MEAN, fileCount);
iostats.addSample(COMMITTER_TASK_FILE_SIZE_MEAN, fileDataSize);
return manifest;
}
/**
* Recursively scan a directory tree.
* The manifest will contain all files to rename
* (source and dest) and directories to create.
* All files are processed before any of the subdirs are.
* This helps in statistics gathering.
* There's some optimizations which could be done with async
* fetching of the iterators of those subdirs, but as this
* is generally off-critical path then that "enhancement"
* can be postponed until data suggests this needs improvement.
* @param manifest manifest to update
* @param srcDir dir to scan
* @param destDir destination directory
* @param depth depth from the task attempt dir.
* @param parentDirExists does the parent dir exist?
* @return the maximum depth of child directories
* @throws IOException IO failure.
*/
private int scanDirectoryTree(
TaskManifest manifest,
Path srcDir,
Path destDir,
int depth,
boolean parentDirExists) throws IOException {
// generate some task progress in case directory scanning is very slow.
progress();
int maxDepth = 0;
int files = 0;
boolean dirExists = parentDirExists;
List<FileStatus> subdirs = new ArrayList<>();
try (DurationInfo ignored = new DurationInfo(LOG, false,
"Task Attempt %s source dir %s, dest dir %s",
getTaskAttemptId(), srcDir, destDir)) {
// list the directory. This may block until the listing is complete,
// or, if the FS does incremental or asynchronous fetching,
// then the next()/hasNext() call will block for the results
// unless turned off, ABFS does to this async
final RemoteIterator<FileStatus> listing = listStatusIterator(srcDir);
// when the FS (especially ABFS) does an asyn fetch of the listing,
// we can probe for the status of the destination dir while that
// page is being fetched.
// probe for and add the dest dir entry for all but
// the base dir
if (depth > 0) {
final EntryStatus status;
if (parentDirExists) {
final FileStatus destDirStatus = getFileStatusOrNull(destDir);
status = EntryStatus.toEntryStatus(destDirStatus);
dirExists = destDirStatus != null;
} else {
// if there is no parent dir, then there is no need to look
// for this directory -report it as missing automatically.
status = EntryStatus.not_found;
}
manifest.addDirectory(DirEntry.dirEntry(
destDir,
status,
depth));
}
// process the listing; this is where abfs will block
// to wait the result of the list call.
while (listing.hasNext()) {
final FileStatus st = listing.next();
if (st.isFile()) {
// this is a file, so add to the list of files to commit.
files++;
final FileEntry entry = fileEntry(st, destDir);
manifest.addFileToCommit(entry);
LOG.debug("To rename: {}", entry);
} else {
if (st.isDirectory()) {
// will need to scan this directory too.
subdirs.add(st);
} else {
// some other object. ignoring
LOG.info("Ignoring FS object {}", st);
}
}
}
// add any statistics provided by the listing.
maybeAddIOStatistics(getIOStatistics(), listing);
}
// now scan the subdirectories
LOG.debug("{}: Number of subdirectories under {} found: {}; file count {}",
getName(), srcDir, subdirs.size(), files);
for (FileStatus st : subdirs) {
Path destSubDir = new Path(destDir, st.getPath().getName());
final int d = scanDirectoryTree(manifest,
st.getPath(),
destSubDir,
depth + 1,
dirExists);
maxDepth = Math.max(maxDepth, d);
}
return 1 + maxDepth;
}
}
| TaskAttemptScanDirectoryStage |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetoone/JoinColumnTest.java | {
"start": 1105,
"end": 1368
} | class ____ implements Serializable {
@Column
String id_one_2;
@Column
String id_two_2;
@Column
String id_three_2;
}
@EmbeddedId
Pk id;
@OneToOne(mappedBy = "aLeftEntity")
MidEntity midEntity;
}
@Entity(name = "MidEntity")
static | Pk |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java | {
"start": 1259,
"end": 3053
} | class ____ extends ESIntegTestCase {
/**
* Test that searches using cardinality aggregations returns all request breaker memory.
*/
public void testRequestBreaker() throws Exception {
final String requestBreaker = randomIntBetween(1, 10000) + "kb";
logger.info("--> Using request breaker setting: {}", requestBreaker);
indexRandom(
true,
IntStream.range(0, randomIntBetween(10, 1000))
.mapToObj(
i -> prepareIndex("test").setId("id_" + i)
.setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5)))
)
.toArray(IndexRequestBuilder[]::new)
);
updateClusterSettings(
Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), requestBreaker)
);
try {
prepareSearch("test").addAggregation(
terms("terms").field("field0.keyword")
.collectMode(randomFrom(Aggregator.SubAggCollectionMode.values()))
.order(BucketOrder.aggregation("cardinality", randomBoolean()))
.subAggregation(cardinality("cardinality").precisionThreshold(randomLongBetween(1, 40000)).field("field1.keyword"))
).get().decRef();
} catch (ElasticsearchException e) {
if (ExceptionsHelper.unwrap(e, CircuitBreakingException.class) == null) {
throw e;
}
}
updateClusterSettings(Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()));
// validation done by InternalTestCluster.ensureEstimatedStats()
}
}
| CardinalityWithRequestBreakerIT |
java | junit-team__junit5 | junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/EnclosedJUnit4TestCase.java | {
"start": 741,
"end": 955
} | class ____ {
@Test
@Category(Categories.Failing.class)
public void failingTest() {
fail("this test should fail");
}
@Test
public void successfulTest() {
assertEquals(3, 1 + 2);
}
}
}
| NestedClass |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/relation/query/QueryConstants.java | {
"start": 290,
"end": 877
} | class ____ {
private QueryConstants() {
}
public static final String REFERENCED_ENTITY_ALIAS = "e__";
public static final String REFERENCED_ENTITY_ALIAS_DEF_AUD_STR = "e2__";
public static final String INDEX_ENTITY_ALIAS = "f__";
public static final String INDEX_ENTITY_ALIAS_DEF_AUD_STR = "f2__";
public static final String MIDDLE_ENTITY_ALIAS = "ee__";
public static final String MIDDLE_ENTITY_ALIAS_DEF_AUD_STR = "ee2__";
public static final String REVISION_PARAMETER = "revision";
public static final String DEL_REVISION_TYPE_PARAMETER = "delrevisiontype";
}
| QueryConstants |
java | elastic__elasticsearch | build-tools/src/main/java/org/elasticsearch/gradle/util/PlatformUtils.java | {
"start": 553,
"end": 881
} | class ____ {
public static String normalize(String input) {
return input.lines()
.map(it -> it.replace('\\', '/'))
.map(it -> it.replaceAll("\\d+\\.\\d\\ds", "0.00s"))
.map(it -> it.replace("file:/./", "file:./"))
.collect(Collectors.joining("\n"));
}
}
| PlatformUtils |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/HttpClient5SslTests.java | {
"start": 18004,
"end": 18575
} | class ____<T> implements FutureCallback<T> {
private final ActionListener<T> listener;
ListenerCallbackAdapter(ActionListener<T> listener) {
this.listener = listener;
}
@Override
public void completed(T result) {
listener.onResponse(result);
}
@Override
public void failed(Exception ex) {
listener.onFailure(ex);
}
@Override
public void cancelled() {
failed(new RuntimeException("cancelled"));
}
}
}
| ListenerCallbackAdapter |
java | elastic__elasticsearch | modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java | {
"start": 1153,
"end": 2138
} | interface ____ {
/**
* Returns a {@link Map} containing all named capture groups if the
* string matches or {@code null} if it doesn't.
*/
Map<String, ?> extract(String in);
/**
* Create a {@link NamedGroupExtractor} that runs {@link DissectParser}
* with the default {@code appendSeparator}.
*/
static NamedGroupExtractor dissect(String pattern) {
return dissect(pattern, null);
}
/**
* Create a {@link NamedGroupExtractor} that runs {@link DissectParser}.
*/
static NamedGroupExtractor dissect(String pattern, String appendSeparator) {
DissectParser dissect = new DissectParser(pattern, appendSeparator);
return new NamedGroupExtractor() {
@Override
public Map<String, ?> extract(String in) {
return dissect.parse(in);
}
};
}
/**
* Builds {@link NamedGroupExtractor}s from grok patterns.
*/
| NamedGroupExtractor |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/Function4RaisingIOE.java | {
"start": 1093,
"end": 1411
} | interface ____<I1, I2, I3, I4, R> {
/**
* Apply the function.
* @param i1 argument 1.
* @param i2 argument 2.
* @param i3 argument 3.
* @param i4 argument 4.
* @return return value.
* @throws IOException any IOE.
*/
R apply(I1 i1, I2 i2, I3 i3, I4 i4) throws IOException;
}
| Function4RaisingIOE |
java | apache__flink | flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/pattern/conditions/SimpleCondition.java | {
"start": 1379,
"end": 1525
} | class ____
* simple {@code filter(...)} functions that decide based on the properties of the element at hand.
*/
@PublicEvolving
public abstract | are |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java | {
"start": 8956,
"end": 11515
} | class ____ implements Writeable, ToXContentObject, Comparable<RealmIdentifier> {
private final String type;
private final String name;
public RealmIdentifier(String type, String name) {
this.type = Objects.requireNonNull(type, "Realm type cannot be null");
this.name = Objects.requireNonNull(name, "Realm name cannot be null");
}
public RealmIdentifier(StreamInput in) throws IOException {
this.type = in.readString();
this.name = in.readString();
}
public String getType() {
return type;
}
public String getName() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
final RealmIdentifier other = (RealmIdentifier) o;
return Objects.equals(this.type, other.type) && Objects.equals(this.name, other.name);
}
@Override
public int hashCode() {
return Objects.hash(type, name);
}
@Override
public String toString() {
return type + '/' + name;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(type);
out.writeString(name);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field("name", name);
builder.field("type", type);
}
builder.endObject();
return builder;
}
@Override
public int compareTo(RealmIdentifier other) {
int result = name.compareTo(other.name);
return (result == 0) ? type.compareTo(other.type) : result;
}
}
public static final ConstructingObjectParser<RealmIdentifier, Void> REALM_IDENTIFIER_PARSER = new ConstructingObjectParser<>(
"realm_identifier",
false,
(args, v) -> new RealmIdentifier((String) args[0], (String) args[1])
);
static {
REALM_IDENTIFIER_PARSER.declareString(constructorArg(), new ParseField("type"));
REALM_IDENTIFIER_PARSER.declareString(constructorArg(), new ParseField("name"));
}
}
| RealmIdentifier |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.