language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/DefaultOpenAPINamingStrategy.java
|
{
"start": 1191,
"end": 2470
}
|
class ____ implements OpenAPINamingStrategy {
@Override
public String generateOperationId(MethodMeta methodMeta, OpenAPI openAPI) {
return methodMeta.getMethod().getName();
}
@Override
public String resolveOperationIdConflict(int attempt, String operationId, MethodMeta methodMeta, OpenAPI openAPI) {
Method method = methodMeta.getMethod();
if (attempt == 1) {
String sig = TypeUtils.buildSig(method);
if (sig != null) {
return method.getName() + '_' + sig;
}
}
return method.getName() + '_' + buildPostfix(attempt, method.toString());
}
@Override
public String generateSchemaName(Class<?> clazz, OpenAPI openAPI) {
return clazz.getSimpleName();
}
@Override
public String resolveSchemaNameConflict(int attempt, String schemaName, Class<?> clazz, OpenAPI openAPI) {
return clazz.getSimpleName() + '_' + buildPostfix(attempt, clazz.getName());
}
private static String buildPostfix(int attempt, String str) {
if (attempt > 4) {
str += ThreadLocalRandom.current().nextInt(10000);
}
return Bytes.bytes2hex(Bytes.getMD5(str), 0, Math.min(4, attempt));
}
}
|
DefaultOpenAPINamingStrategy
|
java
|
apache__camel
|
components/camel-avro-rpc/camel-avro-rpc-component/src/main/java/org/apache/camel/component/avro/AvroNettyEndpoint.java
|
{
"start": 922,
"end": 1781
}
|
class ____ extends AvroEndpoint {
/**
* Constructs a fully-initialized DefaultEndpoint instance. This is the preferred method of constructing an object
* from Java code (as opposed to Spring beans, etc.).
*
* @param endpointUri the full URI used to create this endpoint
* @param component the component that created this endpoint
*/
public AvroNettyEndpoint(String endpointUri, Component component, AvroConfiguration configuration) {
super(endpointUri, component, configuration);
}
/**
* Creates a new producer which is used send messages into the endpoint
*
* @return a newly created producer
* @throws Exception can be thrown
*/
@Override
public Producer createProducer() throws Exception {
return new AvroNettyProducer(this);
}
}
|
AvroNettyEndpoint
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableRefCountTest.java
|
{
"start": 39409,
"end": 43663
}
|
class ____ extends ConnectableFlowable<Object>
implements Disposable {
@Override
public void connect(Consumer<? super Disposable> connection) {
try {
connection.accept(Disposable.empty());
} catch (Throwable ex) {
throw ExceptionHelper.wrapOrThrow(ex);
}
}
@Override
public void reset() {
// nothing to do in this test
}
@Override
protected void subscribeActual(Subscriber<? super Object> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onComplete();
subscriber.onComplete();
subscriber.onError(new TestException());
}
@Override
public void dispose() {
}
@Override
public boolean isDisposed() {
return false;
}
}
@Test
public void doubleOnX() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadFlowableDoubleOnX()
.refCount()
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void doubleOnXCount() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadFlowableDoubleOnX()
.refCount(1)
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void doubleOnXTime() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new BadFlowableDoubleOnX()
.refCount(5, TimeUnit.SECONDS, Schedulers.single())
.test()
.assertResult();
TestHelper.assertError(errors, 0, ProtocolViolationException.class);
TestHelper.assertUndeliverable(errors, 1, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void cancelTerminateStateExclusion() {
FlowableRefCount<Object> o = (FlowableRefCount<Object>)PublishProcessor.create()
.publish()
.refCount();
o.cancel(null);
RefConnection rc = new RefConnection(o);
o.connection = null;
rc.subscriberCount = 0;
o.timeout(rc);
rc.subscriberCount = 1;
o.timeout(rc);
o.connection = rc;
o.timeout(rc);
rc.subscriberCount = 0;
o.timeout(rc);
// -------------------
rc.subscriberCount = 2;
rc.connected = false;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 1;
rc.connected = false;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 2;
rc.connected = true;
o.connection = rc;
o.cancel(rc);
rc.subscriberCount = 1;
rc.connected = true;
o.connection = rc;
rc.set(null);
o.cancel(rc);
o.connection = rc;
o.cancel(new RefConnection(o));
}
@Test
public void replayRefCountShallBeThreadSafe() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
Flowable<Integer> flowable = Flowable.just(1).replay(1).refCount();
TestSubscriber<Integer> ts1 = flowable
.subscribeOn(Schedulers.io())
.test();
TestSubscriber<Integer> ts2 = flowable
.subscribeOn(Schedulers.io())
.test();
ts1
.withTag("" + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
ts2
.withTag("" + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
}
}
static final
|
BadFlowableDoubleOnX
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/main/java/org/junit/vintage/engine/descriptor/RunnerTestDescriptor.java
|
{
"start": 1726,
"end": 6659
}
|
class ____ extends VintageTestDescriptor {
private static final Logger logger = LoggerFactory.getLogger(RunnerTestDescriptor.class);
private final Set<Description> rejectedExclusions = new HashSet<>();
private Runner runner;
private final boolean ignored;
private boolean wasFiltered;
private @Nullable List<Filter> filters = new ArrayList<>();
public RunnerTestDescriptor(UniqueId uniqueId, Class<?> testClass, Runner runner, boolean ignored) {
super(uniqueId, runner.getDescription(), testClass.getSimpleName(), ClassSource.from(testClass));
this.runner = runner;
this.ignored = ignored;
}
@Override
public String getLegacyReportingName() {
return getSource().map(source -> ((ClassSource) source).getClassName()) //
.orElseThrow(() -> new JUnitException("source should have been present"));
}
public Request toRequest() {
return new RunnerRequest(this.runner);
}
public Runner getRunner() {
return runner;
}
@Override
protected boolean tryToExcludeFromRunner(Description description) {
boolean excluded = tryToFilterRunner(description);
if (excluded) {
wasFiltered = true;
}
else {
rejectedExclusions.add(description);
}
return excluded;
}
private boolean tryToFilterRunner(Description description) {
if (runner instanceof Filterable filterable) {
ExcludeDescriptionFilter filter = new ExcludeDescriptionFilter(description);
try {
filterable.filter(filter);
}
catch (NoTestsRemainException ignore) {
// it's safe to ignore this exception because childless TestDescriptors will get pruned
}
return filter.wasSuccessful();
}
return false;
}
@Override
protected boolean canBeRemovedFromHierarchy() {
return true;
}
@Override
public void prune() {
if (wasFiltered) {
// filtering the runner may render intermediate Descriptions obsolete
// (e.g. test classes without any remaining children in a suite)
pruneDescriptorsForObsoleteDescriptions(List.of(runner.getDescription()));
}
if (rejectedExclusions.isEmpty()) {
super.prune();
}
else if (rejectedExclusions.containsAll(getDescription().getChildren())) {
// since the Runner was asked to remove all of its direct children,
// it's safe to remove it entirely
removeFromHierarchy();
}
else {
logIncompleteFiltering();
}
}
private void logIncompleteFiltering() {
if (runner instanceof Filterable) {
logger.warn(() -> "Runner " + getRunnerToReport().getClass().getName() //
+ " (used on class " + getLegacyReportingName() + ") was not able to satisfy all filter requests.");
}
else {
warnAboutUnfilterableRunner();
}
}
private void warnAboutUnfilterableRunner() {
logger.warn(() -> "Runner " + getRunnerToReport().getClass().getName() //
+ " (used on class " + getLegacyReportingName() + ") does not support filtering" //
+ " and will therefore be run completely.");
}
public Optional<List<Filter>> getFilters() {
return Optional.ofNullable(filters);
}
public void clearFilters() {
this.filters = null;
}
public void applyFilters(Consumer<RunnerTestDescriptor> childrenCreator) {
if (filters != null && !filters.isEmpty()) {
if (runner instanceof Filterable) {
this.runner = toRequest().filterWith(new OrFilter(filters)).getRunner();
this.description = runner.getDescription();
this.children.clear();
childrenCreator.accept(this);
}
else {
warnAboutUnfilterableRunner();
}
}
clearFilters();
}
private Runner getRunnerToReport() {
return (runner instanceof RunnerDecorator decorator) ? decorator.getDecoratedRunner() : runner;
}
public boolean isIgnored() {
return ignored;
}
public void setExecutorService(ExecutorService executorService) {
Runner runner = getRunnerToReport();
if (runner instanceof ParentRunner<?> parentRunner) {
parentRunner.setScheduler(new RunnerScheduler() {
private final List<Future<?>> futures = new CopyOnWriteArrayList<>();
@Override
public void schedule(Runnable childStatement) {
futures.add(executorService.submit(childStatement));
}
@Override
public void finished() {
ThrowableCollector collector = new OpenTest4JAwareThrowableCollector();
AtomicBoolean wasInterrupted = new AtomicBoolean(false);
for (Future<?> future : futures) {
collector.execute(() -> {
// We're calling `Future.get()` individually to allow for work stealing
// in case `ExecutorService` is a `ForkJoinPool`
try {
future.get();
}
catch (ExecutionException e) {
throw e.getCause();
}
catch (InterruptedException e) {
wasInterrupted.set(true);
}
});
}
collector.assertEmpty();
if (wasInterrupted.get()) {
logger.warn(() -> "Interrupted while waiting for runner to finish");
Thread.currentThread().interrupt();
}
}
});
}
}
private static
|
RunnerTestDescriptor
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/env/MergedPropertiesFilesOverriddenByInlinedPropertiesTestPropertySourceTests.java
|
{
"start": 1327,
"end": 1752
}
|
class ____ extends
MergedPropertiesFilesTestPropertySourceTests {
@Test
@Override
void verifyPropertiesAreAvailableInEnvironment() {
assertThat(env.getProperty("explicit")).isEqualTo("inlined");
}
@Test
@Override
void verifyExtendedPropertiesAreAvailableInEnvironment() {
assertThat(env.getProperty("extended")).isEqualTo("inlined2");
}
}
|
MergedPropertiesFilesOverriddenByInlinedPropertiesTestPropertySourceTests
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/ext/javatime/ser/MonthSerializer.java
|
{
"start": 481,
"end": 2778
}
|
class ____
extends JSR310FormattedSerializerBase<Month>
{
public static final MonthSerializer INSTANCE = new MonthSerializer();
protected MonthSerializer() { this(null); }
public MonthSerializer(DateTimeFormatter formatter) {
super(Month.class, formatter);
}
private MonthSerializer(MonthSerializer base, DateTimeFormatter dtf, Boolean useTimestamp) {
super(base, dtf, useTimestamp, null, null);
}
@Override
protected MonthSerializer withFormat(DateTimeFormatter dtf,
Boolean useTimestamp, JsonFormat.Shape shape) {
return new MonthSerializer(this, dtf, useTimestamp);
}
@Override
public void serialize(Month value, JsonGenerator g, SerializationContext ctxt)
throws JacksonException
{
if (_useTimestampExplicitOnly(ctxt)) {
g.writeStartArray();
_serialize(g, value, ctxt);
g.writeEndArray();
} else {
_serialize(g, value, ctxt);
}
}
@Override
public void serializeWithType(Month value, JsonGenerator g,
SerializationContext ctxt, TypeSerializer typeSer)
throws JacksonException
{
WritableTypeId typeIdDef = typeSer.writeTypePrefix(g, ctxt,
typeSer.typeId(value, serializationShape(ctxt)));
if ((typeIdDef != null)
&& typeIdDef.valueShape == JsonToken.START_ARRAY) {
_serialize(g, value, ctxt);
} else {
_serialize(g, value, ctxt);
}
typeSer.writeTypeSuffix(g, ctxt, typeIdDef);
}
@Override
protected JsonToken serializationShape(SerializationContext ctxt) {
return _useTimestampExplicitOnly(ctxt) ? JsonToken.START_ARRAY : JsonToken.VALUE_STRING;
}
private void _serialize(JsonGenerator g, Month value, SerializationContext ctxt)
throws JacksonException
{
if (_formatter != null) {
g.writeString(_formatter.format(value));
return;
}
if (ctxt.isEnabled(DateTimeFeature.ONE_BASED_MONTHS)) {
g.writeNumber(value.getValue());
} else {
g.writeNumber(value.getValue() - 1);
}
}
}
|
MonthSerializer
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/expressions/converter/converters/PlusConverter.java
|
{
"start": 1857,
"end": 4075
}
|
class ____ extends CustomizedConverter {
@Override
public RexNode convert(CallExpression call, CallExpressionConvertRule.ConvertContext context) {
checkArgumentNumber(call, 2);
List<RexNode> childrenRexNode = toRexNodes(context, call.getChildren());
if (isCharacterString(toLogicalType(childrenRexNode.get(0).getType()))) {
return context.getRelBuilder()
.call(
FlinkSqlOperatorTable.CONCAT,
childrenRexNode.get(0),
context.getRelBuilder().cast(childrenRexNode.get(1), VARCHAR));
} else if (isCharacterString(toLogicalType(childrenRexNode.get(1).getType()))) {
return context.getRelBuilder()
.call(
FlinkSqlOperatorTable.CONCAT,
context.getRelBuilder().cast(childrenRexNode.get(0), VARCHAR),
childrenRexNode.get(1));
} else if (isTimeInterval(toLogicalType(childrenRexNode.get(0).getType()))
&& childrenRexNode.get(0).getType() == childrenRexNode.get(1).getType()) {
return context.getRelBuilder().call(FlinkSqlOperatorTable.PLUS, childrenRexNode);
} else if (isTimeInterval(toLogicalType(childrenRexNode.get(0).getType()))
&& isTemporal(toLogicalType(childrenRexNode.get(1).getType()))) {
// Calcite has a bug that can't apply INTERVAL + DATETIME (INTERVAL at left)
// we manually switch them here
return context.getRelBuilder()
.call(
FlinkSqlOperatorTable.DATETIME_PLUS,
childrenRexNode.get(1),
childrenRexNode.get(0));
} else if (isTemporal(toLogicalType(childrenRexNode.get(0).getType()))
&& isTemporal(toLogicalType(childrenRexNode.get(1).getType()))) {
return context.getRelBuilder()
.call(FlinkSqlOperatorTable.DATETIME_PLUS, childrenRexNode);
} else {
return context.getRelBuilder().call(FlinkSqlOperatorTable.PLUS, childrenRexNode);
}
}
}
|
PlusConverter
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/ValidateDefinition.java
|
{
"start": 1542,
"end": 4591
}
|
class ____ extends ExpressionNode {
@XmlTransient
private PredicateExceptionFactory factory;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.spi.PredicateExceptionFactory")
private String predicateExceptionFactory;
public ValidateDefinition() {
}
protected ValidateDefinition(ValidateDefinition source) {
super(source);
this.factory = source.factory;
this.predicateExceptionFactory = source.predicateExceptionFactory;
}
public ValidateDefinition(Expression expression) {
super(expression);
}
public ValidateDefinition(Predicate predicate) {
super(predicate);
}
@Override
public ValidateDefinition copyDefinition() {
return new ValidateDefinition(this);
}
@Override
public String toString() {
return "Validate[" + getExpression() + " -> " + getOutputs() + "]";
}
@Override
public String getShortName() {
return "validate";
}
@Override
public String getLabel() {
return "validate[" + getExpression() + "]";
}
/**
* Expression to use for validation as a predicate. The expression should return either <tt>true</tt> or
* <tt>false</tt>. If returning <tt>false</tt> the message is invalid and an exception is thrown.
*/
@Override
public void setExpression(ExpressionDefinition expression) {
// override to include javadoc what the expression is used for
super.setExpression(expression);
}
public PredicateExceptionFactory getFactory() {
return factory;
}
public String getPredicateExceptionFactory() {
return predicateExceptionFactory;
}
/**
* The bean id of custom PredicateExceptionFactory to use for creating the exception when the validation fails.
*
* By default, Camel will throw PredicateValidationException. By using a custom factory you can control which
* exception to throw instead.
*/
public void setPredicateExceptionFactory(String predicateExceptionFactory) {
this.predicateExceptionFactory = predicateExceptionFactory;
}
/**
* The custom PredicateExceptionFactory to use for creating the exception when the validation fails.
*
* By default, Camel will throw PredicateValidationException. By using a custom factory you can control which
* exception to throw instead.
*/
public ValidateDefinition predicateExceptionFactory(PredicateExceptionFactory factory) {
this.factory = factory;
return this;
}
/**
* The bean id of the custom PredicateExceptionFactory to use for creating the exception when the validation fails.
*
* By default, Camel will throw PredicateValidationException. By using a custom factory you can control which
* exception to throw instead.
*/
public ValidateDefinition predicateExceptionFactory(String ref) {
this.predicateExceptionFactory = ref;
return this;
}
}
|
ValidateDefinition
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriodTests.java
|
{
"start": 1634,
"end": 4058
}
|
class ____ extends AbstractScalarFunctionTestCase {
public ToDatePeriodTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> suppliers = new ArrayList<>();
suppliers.add(new TestCaseSupplier(List.of(DATE_PERIOD), () -> {
Period field = (Period) randomLiteral(DATE_PERIOD).value();
return new TestCaseSupplier.TestCase(
List.of(new TestCaseSupplier.TypedData(field, DATE_PERIOD, "field").forceLiteral()),
matchesPattern("LiteralsEvaluator.*"),
DATE_PERIOD,
equalTo(field)
).withoutEvaluator();
}));
for (EsqlDataTypeConverter.INTERVALS interval : DATE_PERIODS) {
for (DataType inputType : List.of(KEYWORD, TEXT)) {
suppliers.add(new TestCaseSupplier(List.of(inputType), () -> {
BytesRef field = new BytesRef(
" ".repeat(randomIntBetween(0, 10)) + (randomBoolean() ? "" : "-") + randomIntBetween(0, 36500000) + " ".repeat(
randomIntBetween(1, 10)
) + interval.toString() + " ".repeat(randomIntBetween(0, 10))
);
TemporalAmount result = EsqlDataTypeConverter.parseTemporalAmount(field.utf8ToString(), DATE_PERIOD);
return new TestCaseSupplier.TestCase(
List.of(new TestCaseSupplier.TypedData(field, inputType, "field").forceLiteral()),
matchesPattern("LiteralsEvaluator.*"),
DATE_PERIOD,
equalTo(result)
).withoutEvaluator();
}));
}
}
return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new ToDatePeriod(source, args.get(0));
}
@Override
public void testSerializationOfSimple() {
assertTrue("Serialization test does not apply", true);
}
@Override
protected Expression serializeDeserializeExpression(Expression expression) {
// Can't be serialized
return expression;
}
}
|
ToDatePeriodTests
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatRequest.java
|
{
"start": 1144,
"end": 1616
}
|
class ____ extends AbstractRequest {
/**
* A member epoch of <code>-1</code> means that the member wants to leave the group.
*/
public static final int LEAVE_GROUP_MEMBER_EPOCH = -1;
public static final int LEAVE_GROUP_STATIC_MEMBER_EPOCH = -2;
/**
* A member epoch of <code>0</code> means that the member wants to join the group.
*/
public static final int JOIN_GROUP_MEMBER_EPOCH = 0;
public static
|
StreamsGroupHeartbeatRequest
|
java
|
grpc__grpc-java
|
examples/src/main/java/io/grpc/examples/customloadbalance/ShufflingPickFirstLoadBalancer.java
|
{
"start": 1926,
"end": 5179
}
|
class ____ {
final Long randomSeed;
Config(Long randomSeed) {
this.randomSeed = randomSeed;
}
}
public ShufflingPickFirstLoadBalancer(Helper helper) {
this.helper = checkNotNull(helper, "helper");
}
@Override
public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) {
List<EquivalentAddressGroup> servers = new ArrayList<>(resolvedAddresses.getAddresses());
if (servers.isEmpty()) {
Status unavailableStatus = Status.UNAVAILABLE.withDescription(
"NameResolver returned no usable address. addrs=" + resolvedAddresses.getAddresses()
+ ", attrs=" + resolvedAddresses.getAttributes());
handleNameResolutionError(unavailableStatus);
return unavailableStatus;
}
Config config
= (Config) resolvedAddresses.getLoadBalancingPolicyConfig();
Collections.shuffle(servers,
config.randomSeed != null ? new Random(config.randomSeed) : new Random());
if (subchannel == null) {
final Subchannel subchannel = helper.createSubchannel(
CreateSubchannelArgs.newBuilder()
.setAddresses(servers)
.build());
subchannel.start(new SubchannelStateListener() {
@Override
public void onSubchannelState(ConnectivityStateInfo stateInfo) {
processSubchannelState(subchannel, stateInfo);
}
});
this.subchannel = subchannel;
helper.updateBalancingState(CONNECTING, new Picker(PickResult.withNoResult()));
subchannel.requestConnection();
} else {
subchannel.updateAddresses(servers);
}
return Status.OK;
}
@Override
public void handleNameResolutionError(Status error) {
if (subchannel != null) {
subchannel.shutdown();
subchannel = null;
}
helper.updateBalancingState(TRANSIENT_FAILURE, new Picker(PickResult.withError(error)));
}
private void processSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateInfo) {
ConnectivityState currentState = stateInfo.getState();
if (currentState == SHUTDOWN) {
return;
}
if (stateInfo.getState() == TRANSIENT_FAILURE || stateInfo.getState() == IDLE) {
helper.refreshNameResolution();
}
SubchannelPicker picker;
switch (currentState) {
case IDLE:
picker = new RequestConnectionPicker();
break;
case CONNECTING:
picker = new Picker(PickResult.withNoResult());
break;
case READY:
picker = new Picker(PickResult.withSubchannel(subchannel));
break;
case TRANSIENT_FAILURE:
picker = new Picker(PickResult.withError(stateInfo.getStatus()));
break;
default:
throw new IllegalArgumentException("Unsupported state:" + currentState);
}
helper.updateBalancingState(currentState, picker);
}
@Override
public void shutdown() {
if (subchannel != null) {
subchannel.shutdown();
}
}
@Override
public void requestConnection() {
if (subchannel != null) {
subchannel.requestConnection();
}
}
/**
* No-op picker which doesn't add any custom picking logic. It just passes already known result
* received in constructor.
*/
private static final
|
Config
|
java
|
quarkusio__quarkus
|
extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/binder/HttpMeterFilterProvider.java
|
{
"start": 406,
"end": 2550
}
|
class ____ {
HttpBinderConfiguration binderConfiguration;
HttpMeterFilterProvider(HttpBinderConfiguration binderConfiguration) {
this.binderConfiguration = binderConfiguration;
}
@Singleton
@Produces
public MeterFilter metricsHttpClientUriTagFilter(HttpClientConfig httpClientConfig) {
if (binderConfiguration.isClientEnabled()) {
return maximumAllowableUriTagsFilter(binderConfiguration.getHttpClientRequestsName(),
httpClientConfig.maxUriTags());
}
return null;
}
@Singleton
@Produces
public MeterFilter metricsHttpServerUriTagFilter(HttpServerConfig httpServerConfig) {
if (binderConfiguration.isServerEnabled()) {
return maximumAllowableUriTagsFilter(binderConfiguration.getHttpServerRequestsName(),
httpServerConfig.maxUriTags());
}
return null;
}
@Singleton
@Produces
public MeterFilter metricsHttpPushUriTagFilter(HttpServerConfig httpServerConfig) {
if (binderConfiguration.isServerEnabled()) {
return maximumAllowableUriTagsFilter(binderConfiguration.getHttpServerPushName(),
httpServerConfig.maxUriTags());
}
return null;
}
@Singleton
@Produces
public MeterFilter metricsHttpWebSocketsUriTagFilter(HttpServerConfig httpServerConfig) {
if (binderConfiguration.isServerEnabled()) {
return maximumAllowableUriTagsFilter(binderConfiguration.getHttpServerWebSocketConnectionsName(),
httpServerConfig.maxUriTags());
}
return null;
}
MeterFilter maximumAllowableUriTagsFilter(final String metricName, final int maximumTagValues) {
MeterFilter denyFilter = new OnlyOnceLoggingDenyMeterFilter(() -> String
.format("Reached the maximum number (%s) of URI tags for '%s'. Are you using path parameters?",
maximumTagValues, metricName));
return MeterFilter.maximumAllowableTags(metricName, "uri", maximumTagValues,
denyFilter);
}
}
|
HttpMeterFilterProvider
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/ClientRedirectHandler.java
|
{
"start": 518,
"end": 1281
}
|
interface ____ meets the following criteria:
* <ul>
* <li>Is a {@code static} method</li>
* <li>Returns any subclass of {@link java.net.URI}</li>
* <li>Takes a single parameter of type {@link jakarta.ws.rs.core.Response}</li>
* </ul>
*
* An example method could look like the following:
*
* <pre>
* {@code
* @ClientRedirectHandler
* static DummyException map(Response response) {
* if (response.getStatus() == 307) {
* return response.getLocation();
* }
* // no redirect
* return null;
* }
*
* }
* </pre>
*
* If {@code null} is returned, Quarkus will not redirect
* {@link org.jboss.resteasy.reactive.client.handlers.RedirectHandler}.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @
|
that
|
java
|
square__okhttp
|
samples/slack/src/main/java/okhttp3/slack/SlackClient.java
|
{
"start": 798,
"end": 3459
}
|
class ____ {
private final SlackApi slackApi;
private OAuthSessionFactory sessionFactory;
/** Guarded by this. */
private OAuthSession session;
public SlackClient(SlackApi slackApi) {
this.slackApi = slackApi;
}
/** Shows a browser URL to authorize this app to act as this user. */
public void requestOauthSession(String scopes, String team) throws Exception {
if (sessionFactory == null) {
sessionFactory = new OAuthSessionFactory(slackApi);
sessionFactory.start();
}
HttpUrl authorizeUrl = sessionFactory.newAuthorizeUrl(scopes, team, session -> {
initOauthSession(session);
System.out.printf("session granted: %s\n", session);
});
System.out.printf("open this URL in a browser: %s\n", authorizeUrl);
}
/** Set the OAuth session for this client. */
public synchronized void initOauthSession(OAuthSession session) {
this.session = session;
this.notifyAll();
}
/** Waits for an OAuth session for this client to be set. */
public synchronized void awaitAccessToken(Timeout timeout) throws InterruptedIOException {
while (session == null) {
timeout.waitUntilNotified(this);
}
}
/** Starts a real time messaging session. */
public void startRtm() throws IOException {
String accessToken;
synchronized (this) {
accessToken = session.access_token;
}
RtmSession rtmSession = new RtmSession(slackApi);
rtmSession.open(accessToken);
}
public static void main(String... args) throws Exception {
String clientId = "0000000000.00000000000";
String clientSecret = "00000000000000000000000000000000";
int port = 53203;
SlackApi slackApi = new SlackApi(clientId, clientSecret, port);
SlackClient client = new SlackClient(slackApi);
String scopes = "channels:history channels:read channels:write chat:write:bot chat:write:user "
+ "dnd:read dnd:write emoji:read files:read files:write:user groups:history groups:read "
+ "groups:write im:history im:read im:write mpim:history mpim:read mpim:write pins:read "
+ "pins:write reactions:read reactions:write search:read stars:read stars:write team:read "
+ "usergroups:read usergroups:write users:read users:write identify";
if (true) {
client.requestOauthSession(scopes, null);
} else {
OAuthSession session = new OAuthSession(true,
"xoxp-XXXXXXXXXX-XXXXXXXXXX-XXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
scopes, "UXXXXXXXX", "My Slack Group", "TXXXXXXXX");
client.initOauthSession(session);
}
client.awaitAccessToken(Timeout.NONE);
client.startRtm();
}
}
|
SlackClient
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java
|
{
"start": 1060,
"end": 5178
}
|
class ____ {
public static final Predicate<FieldExpression.FieldValue> NULL_PREDICATE = field -> field.getValue() == null;
private static final Logger logger = LogManager.getLogger(ExpressionModel.class);
private final Map<String, Object> fieldValues;
private final Map<String, Predicate<FieldExpression.FieldValue>> fieldPredicates;
public ExpressionModel() {
this.fieldValues = new HashMap<>();
this.fieldPredicates = new HashMap<>();
}
/**
* Defines a field using a predicate that corresponds to the type of {@code value}
*
* @see #buildPredicate(Object)
*/
public ExpressionModel defineField(String name, Object value) {
return defineField(name, value, buildPredicate(value));
}
/**
* Defines a field using a supplied predicate.
*/
public ExpressionModel defineField(String name, Object value, Predicate<FieldExpression.FieldValue> predicate) {
this.fieldValues.put(name, value);
this.fieldPredicates.put(name, predicate);
return this;
}
/**
* Returns {@code true} if the named field, matches <em>any</em> of the provided values.
*/
public boolean test(String field, List<FieldExpression.FieldValue> values) {
final Predicate<FieldExpression.FieldValue> predicate = this.fieldPredicates.getOrDefault(field, NULL_PREDICATE);
boolean isMatch = values.stream().anyMatch(predicate);
if (isMatch == false && predicate == NULL_PREDICATE && fieldPredicates.containsKey(field) == false) {
logger.debug(
() -> format(
"Attempt to test field [%s] against value(s) [%s],"
+ " but the field [%s] does not have a value on this object;"
+ " known fields are [%s]",
field,
collectionToCommaDelimitedString(values),
field,
collectionToCommaDelimitedString(fieldPredicates.keySet())
)
);
}
return isMatch;
}
/**
* Constructs a {@link Predicate} that matches correctly based on the type of the provided parameter.
*/
static Predicate<FieldExpression.FieldValue> buildPredicate(Object object) {
if (object == null) {
return NULL_PREDICATE;
}
if (object instanceof Boolean) {
return field -> object.equals(field.getValue());
}
if (object instanceof Number) {
return field -> numberEquals((Number) object, field.getValue());
}
if (object instanceof String) {
return field -> field.getAutomaton() == null ? object.equals(field.getValue()) : field.getAutomaton().run((String) object);
}
if (object instanceof Collection) {
return ((Collection<?>) object).stream()
.map(element -> buildPredicate(element))
.reduce((a, b) -> a.or(b))
.orElse(Predicates.never());
}
throw new IllegalArgumentException("Unsupported value type " + object.getClass());
}
/**
* A comparison of {@link Number} objects that compares by floating point when either value is a {@link Float} or {@link Double}
* otherwise compares by {@link Numbers#toLongExact long}.
*/
private static boolean numberEquals(Number left, Object other) {
if (left.equals(other)) {
return true;
}
if ((other instanceof Number) == false) {
return false;
}
Number right = (Number) other;
if (left instanceof Double || left instanceof Float || right instanceof Double || right instanceof Float) {
return Double.compare(left.doubleValue(), right.doubleValue()) == 0;
}
return Numbers.toLongExact(left) == Numbers.toLongExact(right);
}
public Map<String, Object> asMap() {
return Collections.unmodifiableMap(fieldValues);
}
@Override
public String toString() {
return fieldValues.toString();
}
}
|
ExpressionModel
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/SharedCacheEvictionTests.java
|
{
"start": 9434,
"end": 11172
}
|
class ____ extends LocalStateCompositeXPackPlugin implements SystemIndexPlugin {
private final SearchableSnapshots plugin;
public SpyableSharedCacheSearchableSnapshots(final Settings settings, final Path configPath) {
super(settings, configPath);
this.plugin = new SearchableSnapshots(settings) {
@Override
protected XPackLicenseState getLicenseState() {
return SpyableSharedCacheSearchableSnapshots.this.getLicenseState();
}
@Override
protected SharedBlobCacheService<CacheKey> createSharedBlobCacheService(
Settings settings,
ThreadPool threadPool,
NodeEnvironment nodeEnvironment,
BlobCacheMetrics blobCacheMetrics
) {
final SharedBlobCacheService<CacheKey> spy = Mockito.spy(
super.createSharedBlobCacheService(settings, threadPool, nodeEnvironment, blobCacheMetrics)
);
sharedBlobCacheServices.put(nodeEnvironment.nodeId(), spy);
return spy;
}
};
plugins.add(plugin);
}
@Override
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings settings) {
return plugin.getSystemIndexDescriptors(settings);
}
@Override
public String getFeatureName() {
return plugin.getFeatureName();
}
@Override
public String getFeatureDescription() {
return plugin.getFeatureDescription();
}
}
}
|
SpyableSharedCacheSearchableSnapshots
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/web/client/SecurityContextHolderPrincipalResolver.java
|
{
"start": 1140,
"end": 2104
}
|
class ____ implements OAuth2ClientHttpRequestInterceptor.PrincipalResolver {
private final SecurityContextHolderStrategy securityContextHolderStrategy;
/**
* Constructs a {@code SecurityContextHolderPrincipalResolver}.
*/
public SecurityContextHolderPrincipalResolver() {
this(SecurityContextHolder.getContextHolderStrategy());
}
/**
* Constructs a {@code SecurityContextHolderPrincipalResolver} using the provided
* parameters.
* @param securityContextHolderStrategy the {@link SecurityContextHolderStrategy} to
* use for resolving the {@link Authentication principal}
*/
public SecurityContextHolderPrincipalResolver(SecurityContextHolderStrategy securityContextHolderStrategy) {
this.securityContextHolderStrategy = securityContextHolderStrategy;
}
@Override
public Authentication resolve(HttpRequest request) {
return this.securityContextHolderStrategy.getContext().getAuthentication();
}
}
|
SecurityContextHolderPrincipalResolver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndicesStatsMonitoringDoc.java
|
{
"start": 949,
"end": 4978
}
|
class ____ extends FilteredMonitoringDoc {
public static final String TYPE = "indices_stats";
private final List<IndexStats> indicesStats;
IndicesStatsMonitoringDoc(
final String cluster,
final long timestamp,
final long intervalMillis,
final MonitoringDoc.Node node,
final List<IndexStats> indicesStats
) {
super(cluster, timestamp, intervalMillis, node, MonitoredSystem.ES, TYPE, null, XCONTENT_FILTERS);
this.indicesStats = Objects.requireNonNull(indicesStats);
}
List<IndexStats> getIndicesStats() {
return indicesStats;
}
@Override
protected void innerToXContent(XContentBuilder builder, Params params) throws IOException {
final CommonStats total = new CommonStats();
final CommonStats primaries = new CommonStats();
for (IndexStats indexStats : getIndicesStats()) {
final ShardStats[] shardsStats = indexStats.getShards();
if (shardsStats != null) {
for (ShardStats shard : indexStats.getShards()) {
total.add(shard.getStats());
if (shard.getShardRouting().primary()) {
primaries.add(shard.getStats());
}
}
}
}
builder.startObject(TYPE);
{
builder.startObject("_all");
{
builder.startObject("primaries");
primaries.toXContent(builder, params);
builder.endObject();
builder.startObject("total");
total.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
builder.startObject("indices");
for (IndexStats indexStats : getIndicesStats()) {
builder.startObject(indexStats.getIndex());
builder.startObject("primaries");
indexStats.getPrimaries().toXContent(builder, params);
builder.endObject();
builder.startObject("total");
indexStats.getTotal().toXContent(builder, params);
builder.endObject();
builder.endObject();
}
builder.endObject();
}
builder.endObject();
}
public static final Set<String> XCONTENT_FILTERS = Set.of(
"indices_stats._all.primaries.docs.count",
"indices_stats._all.primaries.indexing.index_time_in_millis",
"indices_stats._all.primaries.indexing.index_total",
"indices_stats._all.primaries.indexing.is_throttled",
"indices_stats._all.primaries.indexing.throttle_time_in_millis",
"indices_stats._all.primaries.search.query_time_in_millis",
"indices_stats._all.primaries.search.query_total",
"indices_stats._all.primaries.store.size_in_bytes",
"indices_stats._all.primaries.bulk.total_operations",
"indices_stats._all.primaries.bulk.total_time_in_millis",
"indices_stats._all.primaries.bulk.total_size_in_bytes",
"indices_stats._all.primaries.bulk.avg_time_in_millis",
"indices_stats._all.primaries.bulk.avg_size_in_bytes",
"indices_stats._all.total.docs.count",
"indices_stats._all.total.indexing.index_time_in_millis",
"indices_stats._all.total.indexing.index_total",
"indices_stats._all.total.indexing.is_throttled",
"indices_stats._all.total.indexing.throttle_time_in_millis",
"indices_stats._all.total.search.query_time_in_millis",
"indices_stats._all.total.search.query_total",
"indices_stats._all.total.store.size_in_bytes",
"indices_stats._all.total.bulk.total_operations",
"indices_stats._all.total.bulk.total_time_in_millis",
"indices_stats._all.total.bulk.total_size_in_bytes",
"indices_stats._all.total.bulk.avg_time_in_millis",
"indices_stats._all.total.bulk.avg_size_in_bytes"
);
}
|
IndicesStatsMonitoringDoc
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/tracing/Tracing.java
|
{
"start": 991,
"end": 1280
}
|
interface ____ declarations of basic required interfaces and
* value objects to represent traces, spans and metadata in an dependency-agnostic manner.
*
* @author Mark Paluch
* @author Daniel Albuquerque
* @since 5.1
* @see TracerProvider
* @see TraceContextProvider
*/
public
|
contains
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/configuration/internal/metadata/reader/ComponentAuditedPropertiesReader.java
|
{
"start": 3730,
"end": 3967
}
|
class ____ property are explicitly 'isAudited=false', use that.
if ( classNotAuditedOverride || isOverriddenNotAudited( memberDetails ) || isOverriddenNotAudited( declaringClass ) ) {
return false;
}
// make sure that if the
|
or
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/authentication/DPoPProofVerifier.java
|
{
"start": 1557,
"end": 2764
}
|
class ____ {
private static final JwtDecoderFactory<DPoPProofContext> dPoPProofVerifierFactory = new DPoPProofJwtDecoderFactory();
private DPoPProofVerifier() {
}
static Jwt verifyIfAvailable(OAuth2AuthorizationGrantAuthenticationToken authorizationGrantAuthentication) {
String dPoPProof = (String) authorizationGrantAuthentication.getAdditionalParameters().get("dpop_proof");
if (!StringUtils.hasText(dPoPProof)) {
return null;
}
String method = (String) authorizationGrantAuthentication.getAdditionalParameters().get("dpop_method");
String targetUri = (String) authorizationGrantAuthentication.getAdditionalParameters().get("dpop_target_uri");
Jwt dPoPProofJwt;
try {
// @formatter:off
DPoPProofContext dPoPProofContext = DPoPProofContext.withDPoPProof(dPoPProof)
.method(method)
.targetUri(targetUri)
.build();
// @formatter:on
JwtDecoder dPoPProofVerifier = dPoPProofVerifierFactory.createDecoder(dPoPProofContext);
dPoPProofJwt = dPoPProofVerifier.decode(dPoPProof);
}
catch (Exception ex) {
throw new OAuth2AuthenticationException(new OAuth2Error(OAuth2ErrorCodes.INVALID_DPOP_PROOF), ex);
}
return dPoPProofJwt;
}
}
|
DPoPProofVerifier
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-orm-rest-data-panache/deployment/src/test/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/build/BuildConditionsWithResourceEnabledTest.java
|
{
"start": 355,
"end": 885
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.overrideConfigKey("collections.enabled", "true")
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(Collection.class, CollectionsResource.class));
@Test
void shouldResourceBeFound() {
given().accept("application/json")
.when().get("/collections")
.then().statusCode(200);
}
}
|
BuildConditionsWithResourceEnabledTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/conversion/nativetypes/DoubleSource.java
|
{
"start": 210,
"end": 1884
}
|
class ____ {
private double b;
private double bb;
private double s;
private double ss;
private double i;
private double ii;
private double l;
private double ll;
private double f;
private double ff;
private double d;
private double dd;
public double getB() {
return b;
}
public void setB(double b) {
this.b = b;
}
public double getBb() {
return bb;
}
public void setBb(double bb) {
this.bb = bb;
}
public double getS() {
return s;
}
public void setS(double s) {
this.s = s;
}
public double getSs() {
return ss;
}
public void setSs(double ss) {
this.ss = ss;
}
public double getI() {
return i;
}
public void setI(double i) {
this.i = i;
}
public double getIi() {
return ii;
}
public void setIi(double ii) {
this.ii = ii;
}
public double getL() {
return l;
}
public void setL(double l) {
this.l = l;
}
public double getLl() {
return ll;
}
public void setLl(double ll) {
this.ll = ll;
}
public double getF() {
return f;
}
public void setF(double f) {
this.f = f;
}
public double getFf() {
return ff;
}
public void setFf(double ff) {
this.ff = ff;
}
public double getD() {
return d;
}
public void setD(double d) {
this.d = d;
}
public double getDd() {
return dd;
}
public void setDd(double dd) {
this.dd = dd;
}
}
|
DoubleSource
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/MockitoAnnotations.java
|
{
"start": 2431,
"end": 4520
}
|
class ____ {
/**
* Initializes objects annotated with Mockito annotations for given testClass:
* @{@link org.mockito.Mock}, @{@link Spy}, @{@link Captor}, @{@link InjectMocks}
* <p>
* See examples in javadoc for {@link MockitoAnnotations} class.
*
* @return A closable to close when completing any tests in {@code testClass}.
*/
public static AutoCloseable openMocks(Object testClass) {
if (testClass == null) {
throw new MockitoException(
"testClass cannot be null. For info how to use @Mock annotations see examples in javadoc for MockitoAnnotations class");
}
AnnotationEngine annotationEngine =
new GlobalConfiguration().tryGetPluginAnnotationEngine();
return annotationEngine.process(testClass.getClass(), testClass);
}
/**
* Initializes objects annotated with Mockito annotations for given testClass:
* @{@link org.mockito.Mock}, @{@link Spy}, @{@link Captor}, @{@link InjectMocks}
* <p>
* See examples in javadoc for {@link MockitoAnnotations} class.
*
* @deprecated Use {@link MockitoAnnotations#openMocks(Object)} instead.
* This method is equivalent to {@code openMocks(testClass).close()}.
* The close method should however only be called after completed usage of {@code testClass}.
* If using static-mocks or custom {@link org.mockito.plugins.MockMaker}s, using this method might
* cause misbehavior of mocks injected into the test class.
*/
@Deprecated
public static void initMocks(Object testClass) {
try {
openMocks(testClass).close();
} catch (Exception e) {
throw new MockitoException(
join(
"Failed to release mocks",
"",
"This should not happen unless you are using a third-party mock maker"),
e);
}
}
private MockitoAnnotations() {}
}
|
MockitoAnnotations
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/timeout/IdleStateHandler.java
|
{
"start": 18418,
"end": 19630
}
|
class ____ extends AbstractIdleTask {
ReaderIdleTimeoutTask(ChannelHandlerContext ctx) {
super(ctx);
}
@Override
protected void run(ChannelHandlerContext ctx) {
long nextDelay = readerIdleTimeNanos;
if (!reading) {
nextDelay -= ticker.nanoTime() - lastReadTime;
}
if (nextDelay <= 0) {
// Reader is idle - set a new timeout and notify the callback.
readerIdleTimeout = schedule(ctx, this, readerIdleTimeNanos, TimeUnit.NANOSECONDS);
boolean first = firstReaderIdleEvent;
firstReaderIdleEvent = false;
try {
IdleStateEvent event = newIdleStateEvent(IdleState.READER_IDLE, first);
channelIdle(ctx, event);
} catch (Throwable t) {
ctx.fireExceptionCaught(t);
}
} else {
// Read occurred before the timeout - set a new timeout with shorter delay.
readerIdleTimeout = schedule(ctx, this, nextDelay, TimeUnit.NANOSECONDS);
}
}
}
private final
|
ReaderIdleTimeoutTask
|
java
|
google__guice
|
core/src/com/google/inject/internal/aop/MethodPartition.java
|
{
"start": 4907,
"end": 6404
}
|
class ____ to the original bridge method (as this would bypass
// interception if the delegate method was itself intercepted by a different interceptor!)
for (Map.Entry<String, Method> targetEntry : bridgeTargets.entrySet()) {
Method originalBridge = leafMethods.get(targetEntry.getKey());
Method superTarget = targetEntry.getValue();
Method enhanceableMethod = originalBridge;
// scan all methods looking for the bridge delegate by comparing generic parameters
// (these are the kind of bridge methods that were added to account for type-erasure)
for (Method candidate : candidates) {
if (!candidate.isBridge()) {
@SuppressWarnings("ReferenceEquality")
boolean sameMethod = candidate == superTarget;
if (sameMethod) {
// if we haven't matched our bridge by generic type and our non-bridge super-method has
// identical parameters and return type to the original (and isn't from an interface)
// then ignore the original bridge method and enhance the super-method instead - this
// helps improve interception behaviour when AOP matchers skip all synthetic methods
if (originalBridge.getReturnType() == superTarget.getReturnType()
&& !superTarget.getDeclaringClass().isInterface()) {
enhanceableMethod = superTarget;
}
break; // we've reached the non-bridge super-method so default to super-
|
invocation
|
java
|
apache__camel
|
core/camel-main/src/main/java/org/apache/camel/main/RouteControllerConfigurationProperties.java
|
{
"start": 1041,
"end": 13505
}
|
class ____ implements BootstrapCloseable {
private MainConfigurationProperties parent;
@Metadata
private boolean enabled;
@Metadata
private String includeRoutes;
@Metadata
private String excludeRoutes;
@Metadata(label = "advanced", defaultValue = "true")
private boolean unhealthyOnExhausted = true;
@Metadata(label = "advanced", defaultValue = "true")
private boolean unhealthyOnRestarting = true;
@Metadata
private long initialDelay;
@Metadata(defaultValue = "2000")
private long backOffDelay;
@Metadata
private long backOffMaxDelay;
@Metadata
private long backOffMaxElapsedTime;
@Metadata
private long backOffMaxAttempts;
@Metadata
private double backOffMultiplier;
@Metadata(label = "advanced", defaultValue = "1")
private int threadPoolSize;
public RouteControllerConfigurationProperties(MainConfigurationProperties parent) {
this.parent = parent;
}
public MainConfigurationProperties end() {
return parent;
}
@Override
public void close() {
parent = null;
}
public boolean isEnabled() {
return enabled;
}
/**
* To enable using supervising route controller which allows Camel to start up and then, the controller takes care
* of starting the routes in a safe manner.
*
* This can be used when you want to start up Camel despite a route may otherwise fail fast during startup and cause
* Camel to fail to start up as well. By delegating the route startup to the supervising route controller, then it
* manages the startup using a background thread. The controller allows to be configured with various settings to
* attempt to restart failing routes.
*/
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public String getIncludeRoutes() {
return includeRoutes;
}
/**
* Pattern for filtering routes to be included as supervised.
*
* The pattern is matching on route id, and endpoint uri for the route. Multiple patterns can be separated by comma.
*
* For example to include all kafka routes, you can say <tt>kafka:*</tt>. And to include routes with specific route
* ids <tt>myRoute,myOtherRoute</tt>. The pattern supports wildcards and uses the matcher from
* org.apache.camel.support.PatternHelper#matchPattern.
*/
public void setIncludeRoutes(String includeRoutes) {
this.includeRoutes = includeRoutes;
}
public String getExcludeRoutes() {
return excludeRoutes;
}
/**
* Pattern for filtering routes to be excluded as supervised.
*
* The pattern is matching on route id, and endpoint uri for the route. Multiple patterns can be separated by comma.
*
* For example to exclude all JMS routes, you can say <tt>jms:*</tt>. And to exclude routes with specific route ids
* <tt>mySpecialRoute,myOtherSpecialRoute</tt>. The pattern supports wildcards and uses the matcher from
* org.apache.camel.support.PatternHelper#matchPattern.
*/
public void setExcludeRoutes(String excludeRoutes) {
this.excludeRoutes = excludeRoutes;
}
public int getThreadPoolSize() {
return threadPoolSize;
}
/**
* The number of threads used by the route controller scheduled thread pool that are used for restarting routes. The
* pool uses 1 thread by default, but you can increase this to allow the controller to concurrently attempt to
* restart multiple routes in case more than one route has problems starting.
*/
public void setThreadPoolSize(int threadPoolSize) {
this.threadPoolSize = threadPoolSize;
}
public long getInitialDelay() {
return initialDelay;
}
/**
* Initial delay in milli seconds before the route controller starts, after CamelContext has been started.
*/
public void setInitialDelay(long initialDelay) {
this.initialDelay = initialDelay;
}
public long getBackOffDelay() {
return backOffDelay;
}
/**
* Backoff delay in millis when restarting a route that failed to startup.
*/
public void setBackOffDelay(long backOffDelay) {
this.backOffDelay = backOffDelay;
}
public long getBackOffMaxDelay() {
return backOffMaxDelay;
}
/**
* Backoff maximum delay in millis when restarting a route that failed to startup.
*/
public void setBackOffMaxDelay(long backOffMaxDelay) {
this.backOffMaxDelay = backOffMaxDelay;
}
public long getBackOffMaxElapsedTime() {
return backOffMaxElapsedTime;
}
/**
* Backoff maximum elapsed time in millis, after which the backoff should be considered exhausted and no more
* attempts should be made.
*/
public void setBackOffMaxElapsedTime(long backOffMaxElapsedTime) {
this.backOffMaxElapsedTime = backOffMaxElapsedTime;
}
public long getBackOffMaxAttempts() {
return backOffMaxAttempts;
}
/**
* Backoff maximum number of attempts to restart a route that failed to startup. When this threshold has been
* exceeded then the controller will give up attempting to restart the route, and the route will remain as stopped.
*/
public void setBackOffMaxAttempts(long backOffMaxAttempts) {
this.backOffMaxAttempts = backOffMaxAttempts;
}
public double getBackOffMultiplier() {
return backOffMultiplier;
}
/**
* Backoff multiplier to use for exponential backoff. This is used to extend the delay between restart attempts.
*/
public void setBackOffMultiplier(double backOffMultiplier) {
this.backOffMultiplier = backOffMultiplier;
}
public boolean isUnhealthyOnExhausted() {
return unhealthyOnExhausted;
}
/**
* Whether to mark the route as unhealthy (down) when all restarting attempts (backoff) have failed and the route is
* not successfully started and the route manager is giving up.
*
* If setting this to false will make health checks ignore this problem and allow to report the Camel application as
* UP.
*/
public void setUnhealthyOnExhausted(boolean unhealthyOnExhausted) {
this.unhealthyOnExhausted = unhealthyOnExhausted;
}
public boolean isUnhealthyOnRestarting() {
return unhealthyOnRestarting;
}
/**
* Whether to mark the route as unhealthy (down) when the route failed to initially start, and is being controlled
* for restarting (backoff).
*
* If setting this to false will make health checks ignore this problem and allow to report the Camel application as
* UP.
*/
public void setUnhealthyOnRestarting(boolean unhealthyOnRestarting) {
this.unhealthyOnRestarting = unhealthyOnRestarting;
}
/**
* To enable using supervising route controller which allows Camel to start up and then, the controller takes care
* of starting the routes in a safe manner.
*
* This can be used when you want to start up Camel despite a route may otherwise fail fast during startup and cause
* Camel to fail to start up as well. By delegating the route startup to the supervising route controller, then it
* manages the startup using a background thread. The controller allows to be configured with various settings to
* attempt to restart failing routes.
*/
public RouteControllerConfigurationProperties withEnabled(boolean enabled) {
this.enabled = enabled;
return this;
}
/**
* Initial delay in milliseconds before the route controller starts, after CamelContext has been started.
*/
public RouteControllerConfigurationProperties withInitialDelay(long initialDelay) {
this.initialDelay = initialDelay;
return this;
}
/**
* Backoff delay in millis when restarting a route that failed to startup.
*/
public RouteControllerConfigurationProperties withBackOffDelay(long backOffDelay) {
this.backOffDelay = backOffDelay;
return this;
}
/**
* Backoff maximum delay in millis when restarting a route that failed to startup.
*/
public RouteControllerConfigurationProperties withBackOffMaxDelay(long backOffMaxDelay) {
this.backOffMaxDelay = backOffMaxDelay;
return this;
}
/**
* Backoff maximum elapsed time in millis, after which the backoff should be considered exhausted and no more
* attempts should be made.
*/
public RouteControllerConfigurationProperties withBackOffMaxElapsedTime(long backOffMaxElapsedTime) {
this.backOffMaxElapsedTime = backOffMaxElapsedTime;
return this;
}
/**
* Backoff maximum number of attempts to restart a route that failed to startup. When this threshold has been
* exceeded then the controller will give up attempting to restart the route, and the route will remain as stopped.
*/
public RouteControllerConfigurationProperties withBackOffMaxAttempts(long backOffMaxAttempts) {
this.backOffMaxAttempts = backOffMaxAttempts;
return this;
}
/**
* Backoff multiplier to use for exponential backoff. This is used to extend the delay between restart attempts.
*/
public RouteControllerConfigurationProperties withBackOffMultiplier(double backOffMultiplier) {
this.backOffMultiplier = backOffMultiplier;
return this;
}
/**
* The number of threads used by the route controller scheduled thread pool that are used for restarting routes. The
* pool uses 1 thread by default, but you can increase this to allow the controller to concurrently attempt to
* restart multiple routes in case more than one route has problems starting.
*/
public RouteControllerConfigurationProperties withThreadPoolSize(int threadPoolSize) {
this.threadPoolSize = threadPoolSize;
return this;
}
/**
* Pattern for filtering routes to be included as supervised.
*
* The pattern is matching on route id, and endpoint uri for the route. Multiple patterns can be separated by comma.
*
* For example to include all kafka routes, you can say <tt>kafka:*</tt>. And to include routes with specific route
* ids <tt>myRoute,myOtherRoute</tt>. The pattern supports wildcards and uses the matcher from
* org.apache.camel.support.PatternHelper#matchPattern.
*/
public RouteControllerConfigurationProperties withIncludeRoutes(String includeRoutes) {
this.includeRoutes = includeRoutes;
return this;
}
/**
* Pattern for filtering routes to be excluded as supervised.
*
* The pattern is matching on route id, and endpoint uri for the route. Multiple patterns can be separated by comma.
*
* For example to exclude all JMS routes, you can say <tt>jms:*</tt>. And to exclude routes with specific route ids
* <tt>mySpecialRoute,myOtherSpecialRoute</tt>. The pattern supports wildcards and uses the matcher from
* org.apache.camel.support.PatternHelper#matchPattern.
*/
public RouteControllerConfigurationProperties withExcludeRoutes(String excludeRoutes) {
this.excludeRoutes = excludeRoutes;
return this;
}
/**
* Whether to mark the route as unhealthy (down) when all restarting attempts (backoff) have failed and the route is
* not successfully started and the route manager is giving up.
*
* If setting this to false will make health checks ignore this problem and allow to report the Camel application as
* UP.
*/
public RouteControllerConfigurationProperties withUnhealthyOnExhausted(boolean unhealthyOnExhausted) {
this.unhealthyOnExhausted = unhealthyOnExhausted;
return this;
}
/**
* Whether to mark the route as unhealthy (down) when the route failed to initially start, and is being controlled
* for restarting (backoff).
*
* If setting this to false will make health checks ignore this problem and allow to report the Camel application as
* UP.
*/
public RouteControllerConfigurationProperties withUnhealthyOnRestarting(boolean unhealthyOnRestarting) {
this.unhealthyOnRestarting = unhealthyOnRestarting;
return this;
}
}
|
RouteControllerConfigurationProperties
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/selection/resulttype/GoldenDeliciousDto.java
|
{
"start": 244,
"end": 366
}
|
class ____ extends AppleDto {
public GoldenDeliciousDto(String type) {
super( type );
}
}
|
GoldenDeliciousDto
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/script/UpdateByQueryMetadataTests.java
|
{
"start": 578,
"end": 2298
}
|
class ____ extends ESTestCase {
UpdateByQueryMetadata meta;
public void testROFields() {
meta = new UpdateByQueryMetadata("myIndex", "myId", 5, "myRouting", "index", 12345000);
IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("_index", "something"));
assertEquals("_index cannot be updated", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> meta.put("_id", "something"));
assertEquals("_id cannot be updated", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> meta.put("_version", 600));
assertEquals("_version cannot be updated", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> meta.put("_routing", "something"));
assertEquals("_routing cannot be updated", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> meta.put("_now", 9000));
assertEquals("_now cannot be updated", err.getMessage());
}
public void testOpSet() {
meta = new UpdateByQueryMetadata("myIndex", "myId", 5, "myRouting", "index", 12345000);
for (String op : List.of("noop", "index", "delete")) {
meta.setOp(op);
meta.put("op", op);
}
IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("op", "bad"));
assertEquals("[op] must be one of delete, index, noop, not [bad]", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> meta.setOp("bad"));
assertEquals("[op] must be one of delete, index, noop, not [bad]", err.getMessage());
}
}
|
UpdateByQueryMetadataTests
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/pkg/jar/AbstractJarBuilder.java
|
{
"start": 1487,
"end": 14652
}
|
class ____<T extends BuildItem> implements JarBuilder<T> {
private static final Logger LOG = Logger.getLogger(AbstractJarBuilder.class);
protected final CurateOutcomeBuildItem curateOutcome;
protected final OutputTargetBuildItem outputTarget;
protected final ApplicationInfoBuildItem applicationInfo;
protected final PackageConfig packageConfig;
protected final MainClassBuildItem mainClass;
protected final ApplicationArchivesBuildItem applicationArchives;
protected final TransformedClassesBuildItem transformedClasses;
protected final List<GeneratedClassBuildItem> generatedClasses;
protected final List<GeneratedResourceBuildItem> generatedResources;
protected final Set<ArtifactKey> removedArtifactKeys;
public AbstractJarBuilder(CurateOutcomeBuildItem curateOutcome,
OutputTargetBuildItem outputTarget,
ApplicationInfoBuildItem applicationInfo,
PackageConfig packageConfig,
MainClassBuildItem mainClass,
ApplicationArchivesBuildItem applicationArchives,
TransformedClassesBuildItem transformedClasses,
List<GeneratedClassBuildItem> generatedClasses,
List<GeneratedResourceBuildItem> generatedResources,
Set<ArtifactKey> removedArtifactKeys) {
this.curateOutcome = curateOutcome;
this.outputTarget = outputTarget;
this.applicationInfo = applicationInfo;
this.packageConfig = packageConfig;
this.mainClass = mainClass;
this.applicationArchives = applicationArchives;
this.transformedClasses = transformedClasses;
this.generatedClasses = generatedClasses;
this.generatedResources = generatedResources;
this.removedArtifactKeys = removedArtifactKeys;
checkConsistency(generatedClasses);
}
private static void checkConsistency(List<GeneratedClassBuildItem> generatedClasses) {
Map<String, Long> generatedClassOccurrences = generatedClasses.stream()
.sorted(Comparator.comparing(GeneratedClassBuildItem::binaryName))
.collect(Collectors.groupingBy(GeneratedClassBuildItem::binaryName, Collectors.counting()));
StringBuilder duplicates = new StringBuilder();
for (Entry<String, Long> generatedClassOccurrence : generatedClassOccurrences.entrySet()) {
if (generatedClassOccurrence.getValue() < 2) {
continue;
}
duplicates.append("- ").append(generatedClassOccurrence.getKey()).append(": ")
.append(generatedClassOccurrence.getValue()).append("\n");
}
if (!duplicates.isEmpty()) {
throw new IllegalStateException(
"Multiple GeneratedClassBuildItem were produced for the same classes:\n\n" + duplicates);
}
}
/**
* Copy files from {@code archive} to {@code fs}, filtering out service providers into the given map.
*
* @param archive the root application archive
* @param archiveCreator the archive creator
* @param services the services map
* @throws IOException if an error occurs
*/
protected static void copyFiles(ApplicationArchive archive, ArchiveCreator archiveCreator,
Map<String, List<byte[]>> services,
Predicate<String> ignoredEntriesPredicate) throws IOException {
try {
Map<String, Path> pathsToCopy = new TreeMap<>();
archive.accept(tree -> {
tree.walk(new PathVisitor() {
@Override
public void visitPath(PathVisit visit) {
final Path file = visit.getRoot().relativize(visit.getPath());
final String relativePath = toUri(file);
if (relativePath.isEmpty() || ignoredEntriesPredicate.test(relativePath)) {
return;
}
if (Files.isDirectory(visit.getPath())) {
pathsToCopy.put(relativePath, visit.getPath());
} else {
if (relativePath.startsWith("META-INF/services/") && relativePath.length() > 18
&& services != null) {
final byte[] content;
try {
content = Files.readAllBytes(visit.getPath());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
services.computeIfAbsent(relativePath, (u) -> new ArrayList<>()).add(content);
} else if (!relativePath.equals("META-INF/INDEX.LIST")) {
//TODO: auto generate INDEX.LIST
//this may have implications for Camel though, as they change the layout
//also this is only really relevant for the thin jar layout
pathsToCopy.put(relativePath, visit.getPath());
}
}
}
});
});
for (Entry<String, Path> pathEntry : pathsToCopy.entrySet()) {
if (Files.isDirectory(pathEntry.getValue())) {
archiveCreator.addDirectory(pathEntry.getKey());
} else {
archiveCreator.addFileIfNotExists(pathEntry.getValue(), pathEntry.getKey());
}
}
} catch (RuntimeException re) {
final Throwable cause = re.getCause();
if (cause instanceof IOException) {
throw (IOException) cause;
}
throw re;
}
}
protected void copyCommonContent(ArchiveCreator archiveCreator,
Map<String, List<byte[]>> concatenatedEntries,
Predicate<String> ignoredEntriesPredicate)
throws IOException {
//TODO: this is probably broken in gradle
// if (Files.exists(augmentOutcome.getConfigDir())) {
// copyFiles(augmentOutcome.getConfigDir(), runnerZipFs, services);
// }
for (Set<TransformedClassesBuildItem.TransformedClass> transformed : transformedClasses
.getTransformedClassesByJar().values()) {
for (TransformedClassesBuildItem.TransformedClass i : transformed) {
if (i.getData() != null) {
archiveCreator.addFile(i.getData(), i.getFileName());
}
}
}
for (GeneratedClassBuildItem i : generatedClasses) {
String fileName = fromClassNameToResourceName(i.internalName());
archiveCreator.addFileIfNotExists(i.getClassData(), fileName, ArchiveCreator.CURRENT_APPLICATION);
}
for (GeneratedResourceBuildItem i : generatedResources) {
if (ignoredEntriesPredicate.test(i.getName())) {
continue;
}
if (i.getName().startsWith("META-INF/services/")) {
concatenatedEntries.computeIfAbsent(i.getName(), (u) -> new ArrayList<>()).add(i.getData());
continue;
}
archiveCreator.addFileIfNotExists(i.getData(), i.getName(), ArchiveCreator.CURRENT_APPLICATION);
}
copyFiles(applicationArchives.getRootArchive(), archiveCreator, concatenatedEntries, ignoredEntriesPredicate);
for (Map.Entry<String, List<byte[]>> entry : concatenatedEntries.entrySet()) {
archiveCreator.addFile(entry.getValue(), entry.getKey());
}
}
/**
* Manifest generation is quite simple : we just have to push some attributes in manifest.
* However, it gets a little more complex if the manifest preexists.
* So we first try to see if a manifest exists, and otherwise create a new one.
*
* <b>BEWARE</b> this method should be invoked after file copy from target/classes and so on.
* Otherwise, this manifest manipulation will be useless.
*/
protected static void generateManifest(ArchiveCreator archiveCreator, final String classPath, PackageConfig config,
ResolvedDependency appArtifact,
String mainClassName,
ApplicationInfoBuildItem applicationInfo)
throws IOException {
final Manifest manifest = new Manifest();
Attributes attributes = manifest.getMainAttributes();
attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0");
// JDK 24+ needs --add-opens=java.base/java.lang=ALL-UNNAMED for org.jboss.JDKSpecific.ThreadAccess.clearThreadLocals()
attributes.put(new Attributes.Name("Add-Opens"), "java.base/java.lang");
for (Map.Entry<String, String> attribute : config.jar().manifest().attributes().entrySet()) {
attributes.putValue(attribute.getKey(), attribute.getValue());
}
if (attributes.containsKey(Attributes.Name.CLASS_PATH)) {
LOG.warn(
"A CLASS_PATH entry was already defined in your MANIFEST.MF or using the property quarkus.package.jar.manifest.attributes.\"Class-Path\". Quarkus has overwritten this existing entry.");
}
attributes.put(Attributes.Name.CLASS_PATH, classPath);
if (attributes.containsKey(Attributes.Name.MAIN_CLASS)) {
String existingMainClass = attributes.getValue(Attributes.Name.MAIN_CLASS);
if (!mainClassName.equals(existingMainClass)) {
LOG.warn(
"A MAIN_CLASS entry was already defined in your MANIFEST.MF or using the property quarkus.package.jar.manifest.attributes.\"Main-Class\". Quarkus has overwritten your existing entry.");
}
}
attributes.put(Attributes.Name.MAIN_CLASS, mainClassName);
if (config.jar().manifest().addImplementationEntries()
&& !attributes.containsKey(Attributes.Name.IMPLEMENTATION_TITLE)) {
String name = ApplicationInfoBuildItem.UNSET_VALUE.equals(applicationInfo.getName())
? appArtifact.getArtifactId()
: applicationInfo.getName();
attributes.put(Attributes.Name.IMPLEMENTATION_TITLE, name);
}
if (config.jar().manifest().addImplementationEntries()
&& !attributes.containsKey(Attributes.Name.IMPLEMENTATION_VERSION)) {
String version = ApplicationInfoBuildItem.UNSET_VALUE.equals(applicationInfo.getVersion())
? appArtifact.getVersion()
: applicationInfo.getVersion();
attributes.put(Attributes.Name.IMPLEMENTATION_VERSION, version);
}
for (String sectionName : config.jar().manifest().sections().keySet()) {
for (Map.Entry<String, String> entry : config.jar().manifest().sections().get(sectionName).entrySet()) {
Attributes attribs = manifest.getEntries().computeIfAbsent(sectionName, k -> new Attributes());
attribs.putValue(entry.getKey(), entry.getValue());
}
}
archiveCreator.addManifest(manifest);
}
/**
* Indicates whether the given dependency should be included or not.
* <p>
* A dependency should be included if it is a jar file and:
* <p>
* <ul>
* <li>The dependency is not optional or</li>
* <li>The dependency is part of the optional dependencies to include or</li>
* <li>The optional dependencies to include are absent</li>
* </ul>
*
* @param appDep the dependency to test.
* @param optionalDependencies the optional dependencies to include into the final package.
* @return {@code true} if the dependency should be included, {@code false} otherwise.
*/
protected static boolean includeAppDependency(ResolvedDependency appDep, Optional<Set<ArtifactKey>> optionalDependencies,
Set<ArtifactKey> removedArtifacts) {
if (!appDep.isJar()) {
return false;
}
if (appDep.isOptional()) {
return optionalDependencies.map(appArtifactKeys -> appArtifactKeys.contains(appDep.getKey()))
.orElse(true);
}
if (removedArtifacts.contains(appDep.getKey())) {
return false;
}
return true;
}
protected static String suffixToClassifier(String suffix) {
return suffix.startsWith("-") ? suffix.substring(1) : suffix;
}
protected static String toUri(Path path) {
if (path.isAbsolute()) {
return path.toUri().getPath();
}
if (path.getNameCount() == 0) {
return "";
}
return toUri(new StringBuilder(), path, 0).toString();
}
private static StringBuilder toUri(StringBuilder b, Path path, int seg) {
b.append(path.getName(seg));
if (seg < path.getNameCount() - 1) {
b.append('/');
toUri(b, path, seg + 1);
}
return b;
}
}
|
AbstractJarBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-vector/src/main/java/org/hibernate/vector/internal/PGBinaryVectorJdbcType.java
|
{
"start": 1167,
"end": 3809
}
|
class ____ extends ArrayJdbcType {
public PGBinaryVectorJdbcType(JdbcType elementJdbcType) {
super( elementJdbcType );
}
@Override
public int getDefaultSqlTypeCode() {
return SqlTypes.VECTOR_BINARY;
}
@Override
public <T> JdbcLiteralFormatter<T> getJdbcLiteralFormatter(JavaType<T> javaTypeDescriptor) {
return new PGVectorJdbcLiteralFormatterBinaryVector<>( javaTypeDescriptor );
}
@Override
public <T> JavaType<T> getJdbcRecommendedJavaTypeMapping(
Integer precision,
Integer scale,
TypeConfiguration typeConfiguration) {
return typeConfiguration.getJavaTypeRegistry().getDescriptor( byte[].class );
}
// @Override
// public void appendWriteExpression(String writeExpression, SqlAppender appender, Dialect dialect) {
// appender.append( "cast(" );
// appender.append( writeExpression );
// appender.append( " as varbit)" );
// }
//
// @Override
// public boolean isWriteExpressionTyped(Dialect dialect) {
// return true;
// }
@Override
public @Nullable String castFromPattern(JdbcMapping sourceMapping, @Nullable Size size) {
return sourceMapping.getJdbcType().isStringLike() ? "cast(?1 as varbit)" : null;
}
@Override
public <X> ValueBinder<X> getBinder(final JavaType<X> javaTypeDescriptor) {
return new BasicBinder<>( javaTypeDescriptor, this ) {
@Override
protected void doBind(PreparedStatement st, X value, int index, WrapperOptions options)
throws SQLException {
st.setObject( index, toBitString( getJavaType().unwrap( value, byte[].class, options ) ), Types.OTHER );
}
@Override
protected void doBind(CallableStatement st, X value, String name, WrapperOptions options)
throws SQLException {
st.setObject( name, toBitString( getJavaType().unwrap( value, byte[].class, options ) ), Types.OTHER );
}
};
}
@Override
public <X> ValueExtractor<X> getExtractor(JavaType<X> javaTypeDescriptor) {
return new BasicExtractor<>( javaTypeDescriptor, this ) {
@Override
protected X doExtract(ResultSet rs, int paramIndex, WrapperOptions options) throws SQLException {
return javaTypeDescriptor.wrap( parseBitString( rs.getString( paramIndex ) ), options );
}
@Override
protected X doExtract(CallableStatement statement, int index, WrapperOptions options) throws SQLException {
return javaTypeDescriptor.wrap( parseBitString( statement.getString( index ) ), options );
}
@Override
protected X doExtract(CallableStatement statement, String name, WrapperOptions options) throws SQLException {
return javaTypeDescriptor.wrap( parseBitString( statement.getString( name ) ), options );
}
};
}
}
|
PGBinaryVectorJdbcType
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java
|
{
"start": 2108,
"end": 2180
}
|
class ____ extends BucketsAggregator {
public static
|
IpPrefixAggregator
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/ContextLocalPropagationTest.java
|
{
"start": 3753,
"end": 4999
}
|
class ____ {
private RestClientContextUtil() {
}
public static void putLocal(Object key, Object value) {
determineRestClientContext().putLocal(key, value);
}
private static Context determineRestClientContext() {
// In an ideal world, this would always be populated, however because we never
// defined a proper execution model for the REST Client handlers, currently we are
// in a situation where request filters could be run on the calling context
// and not the client's purpose built context.
// We will need a proper solution soon, but as we need to have a proper way to
// set contextual information in Quarkus 3.20 (LTS), we can't risk breaking
// client code everywhere, so for now we will tell people to check the context
var maybeParentContext = ContextLocals.getParentContext();
Context effectiveContext;
if (maybeParentContext != null) {
effectiveContext = maybeParentContext;
} else {
effectiveContext = Vertx.currentContext();
}
return effectiveContext;
}
}
}
|
RestClientContextUtil
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/RegularImmutableAsList.java
|
{
"start": 1120,
"end": 2865
}
|
class ____<E> extends ImmutableAsList<E> {
private final ImmutableCollection<E> delegate;
private final ImmutableList<? extends E> delegateList;
RegularImmutableAsList(ImmutableCollection<E> delegate, ImmutableList<? extends E> delegateList) {
this.delegate = delegate;
this.delegateList = delegateList;
}
RegularImmutableAsList(ImmutableCollection<E> delegate, Object[] array) {
this(delegate, asImmutableList(array));
}
RegularImmutableAsList(ImmutableCollection<E> delegate, Object[] array, int size) {
this(delegate, asImmutableList(array, size));
}
@Override
ImmutableCollection<E> delegateCollection() {
return delegate;
}
ImmutableList<? extends E> delegateList() {
return delegateList;
}
@SuppressWarnings("unchecked") // safe covariant cast!
@Override
public UnmodifiableListIterator<E> listIterator(int index) {
return (UnmodifiableListIterator<E>) delegateList.listIterator(index);
}
@GwtIncompatible // not present in emulated superclass
@Override
int copyIntoArray(@Nullable Object[] dst, int offset) {
return delegateList.copyIntoArray(dst, offset);
}
@Override
@Nullable Object @Nullable [] internalArray() {
return delegateList.internalArray();
}
@Override
int internalArrayStart() {
return delegateList.internalArrayStart();
}
@Override
int internalArrayEnd() {
return delegateList.internalArrayEnd();
}
@Override
public E get(int index) {
return delegateList.get(index);
}
// redeclare to help optimizers with b/310253115
@SuppressWarnings("RedundantOverride")
@Override
@J2ktIncompatible
@GwtIncompatible
Object writeReplace() {
return super.writeReplace();
}
}
|
RegularImmutableAsList
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/java/org/atinject/jakartatck/auto/events/SomeEvent.java
|
{
"start": 53,
"end": 91
}
|
class ____ implements Event {
}
|
SomeEvent
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/issues/RetryWhileStackOverflowIssueTest.java
|
{
"start": 2077,
"end": 2705
}
|
class ____ {
private int counter;
public String areWeCool() {
int size = currentStackSize();
if (counter++ < 1000) {
return "no";
} else {
return "yes";
}
}
public int getCounter() {
return counter;
}
}
private static int currentStackSize() {
int depth = Thread.currentThread().getStackTrace().length;
if (PRINT_STACK_TRACE) {
new Throwable("Printing Stacktrace depth: " + depth).printStackTrace(System.err);
}
return depth;
}
}
|
MyCoolDude
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/testsupport/TestHelper.java
|
{
"start": 105986,
"end": 109162
}
|
class ____<T> implements FlowableSubscriber<T>, QueueSubscription<T> {
final Subscriber<? super T> downstream;
Subscription upstream;
QueueSubscription<T> qs;
StripBoundarySubscriber(Subscriber<? super T> downstream) {
this.downstream = downstream;
}
@SuppressWarnings("unchecked")
@Override
public void onSubscribe(Subscription subscription) {
this.upstream = subscription;
if (subscription instanceof QueueSubscription) {
qs = (QueueSubscription<T>)subscription;
}
downstream.onSubscribe(this);
}
@Override
public void onNext(T t) {
downstream.onNext(t);
}
@Override
public void onError(Throwable throwable) {
downstream.onError(throwable);
}
@Override
public void onComplete() {
downstream.onComplete();
}
@Override
public int requestFusion(int mode) {
QueueSubscription<T> fs = qs;
if (fs != null) {
return fs.requestFusion(mode & ~BOUNDARY);
}
return NONE;
}
@Override
public boolean offer(T value) {
throw new UnsupportedOperationException("Should not be called");
}
@Override
public boolean offer(T v1, T v2) {
throw new UnsupportedOperationException("Should not be called");
}
@Override
public T poll() throws Throwable {
return qs.poll();
}
@Override
public void clear() {
qs.clear();
}
@Override
public boolean isEmpty() {
return qs.isEmpty();
}
@Override
public void request(long n) {
upstream.request(n);
}
@Override
public void cancel() {
upstream.cancel();
}
}
}
/**
* Strips the {@link QueueFuseable#BOUNDARY} mode flag when the downstream calls {@link QueueSubscription#requestFusion(int)}.
* <p>
* By default, many operators use {@link QueueFuseable#BOUNDARY} to indicate upstream side-effects
* should not leak over a fused boundary. However, some tests want to verify if {@link QueueSubscription#poll()} crashes
* are handled correctly and the most convenient way is to crash {@link Flowable#map} that won't fuse with {@code BOUNDARY}
* flag. This transformer strips this flag and thus allows the function of {@code map} to be executed as part of the
* {@code poll()} chain.
* @param <T> the element type of the flow
* @return the new Transformer instance
*/
public static <T> FlowableTransformer<T, T> flowableStripBoundary() {
return new FlowableStripBoundary<>(null);
}
static final
|
StripBoundarySubscriber
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
|
{
"start": 21752,
"end": 22154
}
|
class ____ implements SyntheticSourceSupport {
@Override
public SyntheticSourceMode mode() {
return SyntheticSourceMode.FALLBACK;
}
@Override
public SourceLoader.SyntheticFieldLoader loader() {
return null;
}
}
SyntheticSourceSupport FALLBACK = new Fallback();
final
|
Fallback
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java
|
{
"start": 3997,
"end": 4206
}
|
enum ____ {
UTF_16(StandardCharsets.UTF_16);
private Charset charSet;
CharsetFormat(Charset charSet) {
this.charSet = charSet;
}
}
public static
|
CharsetFormat
|
java
|
quarkusio__quarkus
|
extensions/hibernate-reactive/deployment/src/test/java/io/quarkus/hibernate/reactive/entities/Hero.java
|
{
"start": 206,
"end": 519
}
|
class ____ {
@jakarta.persistence.Id
@jakarta.persistence.GeneratedValue
public java.lang.Long id;
@Column(unique = true)
public String name;
public String otherName;
public int level;
public String picture;
@Column(columnDefinition = "TEXT")
public String powers;
}
|
Hero
|
java
|
google__auto
|
service/processor/src/test/resources/test/EnclosingGeneric.java
|
{
"start": 786,
"end": 848
}
|
class ____ {
/**
* This is technically a raw
|
EnclosingGeneric
|
java
|
apache__camel
|
core/camel-management/src/main/java/org/apache/camel/management/mbean/LoadTriplet.java
|
{
"start": 1043,
"end": 2966
}
|
class ____ {
// Exponents for EWMA: exp(-INTERVAL / WINDOW) (in seconds)
private static final double EXP_1 = Math.exp(-1 / (60.0));
private static final double EXP_5 = Math.exp(-1 / (60.0 * 5.0));
private static final double EXP_15 = Math.exp(-1 / (60.0 * 15.0));
private static final Lock LOCK = new ReentrantLock();
private double load01 = Double.NaN;
private double load05 = Double.NaN;
private double load15 = Double.NaN;
/**
* Update the load statistics
*
* @param currentReading the current reading
*/
public void update(int currentReading) {
LOCK.lock();
try {
load01 = updateLoad(currentReading, EXP_1, load01);
load05 = updateLoad(currentReading, EXP_5, load05);
load15 = updateLoad(currentReading, EXP_15, load15);
} finally {
LOCK.unlock();
}
}
private double updateLoad(int reading, double exp, double recentLoad) {
return Double.isNaN(recentLoad) ? reading : reading + exp * (recentLoad - reading);
}
public double getLoad1() {
LOCK.lock();
try {
return load01;
} finally {
LOCK.unlock();
}
}
public double getLoad5() {
LOCK.lock();
try {
return load05;
} finally {
LOCK.unlock();
}
}
public double getLoad15() {
LOCK.lock();
try {
return load15;
} finally {
LOCK.unlock();
}
}
public void reset() {
LOCK.lock();
try {
load01 = Double.NaN;
load05 = Double.NaN;
load15 = Double.NaN;
} finally {
LOCK.unlock();
}
}
@Override
public String toString() {
return String.format("%.2f, %.2f, %.2f", getLoad1(), getLoad5(), getLoad15());
}
}
|
LoadTriplet
|
java
|
grpc__grpc-java
|
xds/src/test/java/io/grpc/xds/RbacFilterTest.java
|
{
"start": 3207,
"end": 22453
}
|
class ____ {
private static final String PATH = "auth";
private static final StringMatcher STRING_MATCHER =
StringMatcher.newBuilder().setExact("/" + PATH).setIgnoreCase(true).build();
private static final RbacFilter.Provider FILTER_PROVIDER = new RbacFilter.Provider();
private final String name = "theFilterName";
@Test
public void filterType_serverOnly() {
assertThat(FILTER_PROVIDER.isClientFilter()).isFalse();
assertThat(FILTER_PROVIDER.isServerFilter()).isTrue();
}
@Test
@SuppressWarnings({"unchecked", "deprecation"})
public void ipPortParser() {
CidrRange cidrRange = CidrRange.newBuilder().setAddressPrefix("10.10.10.0")
.setPrefixLen(UInt32Value.of(24)).build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setAndRules(Permission.Set.newBuilder()
.addRules(Permission.newBuilder().setDestinationIp(cidrRange).build())
.addRules(Permission.newBuilder().setDestinationPort(9090).build()).build()
).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setAndIds(Principal.Set.newBuilder()
.addIds(Principal.newBuilder().setDirectRemoteIp(cidrRange).build())
.addIds(Principal.newBuilder().setRemoteIp(cidrRange).build())
.addIds(Principal.newBuilder().setSourceIp(cidrRange).build())
.build()).build());
ConfigOrError<?> result = parseRaw(permissionList, principalList);
assertThat(result.errorDetail).isNull();
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
Attributes attributes = Attributes.newBuilder()
.set(Grpc.TRANSPORT_ATTR_REMOTE_ADDR, new InetSocketAddress("10.10.10.0", 1))
.set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, new InetSocketAddress("10.10.10.0",9090))
.build();
when(serverCall.getAttributes()).thenReturn(attributes);
when(serverCall.getMethodDescriptor()).thenReturn(method().build());
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(((RbacConfig)result.config).authConfig());
AuthDecision decision = engine.evaluate(new Metadata(), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.DENY);
}
@Test
@SuppressWarnings({"unchecked", "deprecation"})
public void portRangeParser() {
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setDestinationPortRange(
Int32Range.newBuilder().setStart(1010).setEnd(65535).build()
).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setRemoteIp(
CidrRange.newBuilder().setAddressPrefix("10.10.10.0")
.setPrefixLen(UInt32Value.of(24)).build()
).build());
ConfigOrError<?> result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNull();
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
Attributes attributes = Attributes.newBuilder()
.set(Grpc.TRANSPORT_ATTR_REMOTE_ADDR, new InetSocketAddress("10.10.10.0", 1))
.set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, new InetSocketAddress("10.10.10.0",9090))
.build();
when(serverCall.getAttributes()).thenReturn(attributes);
when(serverCall.getMethodDescriptor()).thenReturn(method().build());
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(((RbacConfig)result.config).authConfig());
AuthDecision decision = engine.evaluate(new Metadata(), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.DENY);
}
@Test
@SuppressWarnings("unchecked")
public void pathParser() {
PathMatcher pathMatcher = PathMatcher.newBuilder().setPath(STRING_MATCHER).build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setUrlPath(pathMatcher).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setUrlPath(pathMatcher).build());
ConfigOrError<RbacConfig> result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNull();
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
when(serverCall.getMethodDescriptor()).thenReturn(method().build());
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(result.config.authConfig());
AuthDecision decision = engine.evaluate(new Metadata(), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.DENY);
}
@Test
@SuppressWarnings("unchecked")
public void authenticatedParser() throws Exception {
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setNotRule(
Permission.newBuilder().setRequestedServerName(STRING_MATCHER).build()).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setAuthenticated(Authenticated.newBuilder()
.setPrincipalName(STRING_MATCHER).build()).build());
ConfigOrError<?> result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNull();
SSLSession sslSession = mock(SSLSession.class);
X509Certificate mockCert = mock(X509Certificate.class);
when(sslSession.getPeerCertificates()).thenReturn(new X509Certificate[]{mockCert});
when(mockCert.getSubjectAlternativeNames()).thenReturn(
Arrays.<List<?>>asList(Arrays.asList(2, "/" + PATH)));
Attributes attributes = Attributes.newBuilder()
.set(Grpc.TRANSPORT_ATTR_SSL_SESSION, sslSession)
.build();
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
when(serverCall.getAttributes()).thenReturn(attributes);
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(((RbacConfig)result.config).authConfig());
AuthDecision decision = engine.evaluate(new Metadata(), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.DENY);
}
@Test
@SuppressWarnings({"unchecked", "deprecation"})
public void headerParser() {
HeaderMatcher headerMatcher = HeaderMatcher.newBuilder()
.setName("party").setExactMatch("win").build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setHeader(headerMatcher).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setHeader(headerMatcher).build());
ConfigOrError<RbacConfig> result = parseOverride(permissionList, principalList);
assertThat(result.errorDetail).isNull();
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(result.config.authConfig());
AuthDecision decision = engine.evaluate(metadata("party", "win"), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.DENY);
}
@Test
@SuppressWarnings("deprecation")
public void headerParser_headerName() {
HeaderMatcher headerMatcher = HeaderMatcher.newBuilder()
.setName("grpc--feature").setExactMatch("win").build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setHeader(headerMatcher).build());
HeaderMatcher headerMatcher2 = HeaderMatcher.newBuilder()
.setName(":scheme").setExactMatch("win").build();
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setHeader(headerMatcher2).build());
ConfigOrError<RbacConfig> result = parseOverride(permissionList, principalList);
assertThat(result.errorDetail).isNotNull();
}
@Test
@SuppressWarnings("unchecked")
public void compositeRules() {
MetadataMatcher metadataMatcher = MetadataMatcher.newBuilder().build();
@SuppressWarnings("deprecation")
Permission permissionMetadata = Permission.newBuilder().setMetadata(metadataMatcher).build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setOrRules(Permission.Set.newBuilder().addRules(
permissionMetadata).build()).build());
@SuppressWarnings("deprecation")
Principal principalMetadata = Principal.newBuilder().setMetadata(metadataMatcher).build();
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setNotId(principalMetadata).build());
ConfigOrError<? extends FilterConfig> result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNull();
assertThat(result.config).isInstanceOf(RbacConfig.class);
ServerCall<Void,Void> serverCall = mock(ServerCall.class);
GrpcAuthorizationEngine engine =
new GrpcAuthorizationEngine(((RbacConfig)result.config).authConfig());
AuthDecision decision = engine.evaluate(new Metadata(), serverCall);
assertThat(decision.decision()).isEqualTo(GrpcAuthorizationEngine.Action.ALLOW);
}
@SuppressWarnings("unchecked")
@Test
public void testAuthorizationInterceptor() {
ServerCallHandler<Void, Void> mockHandler = mock(ServerCallHandler.class);
ServerCall<Void, Void> mockServerCall = mock(ServerCall.class);
Attributes attr = Attributes.newBuilder()
.set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, new InetSocketAddress("1::", 20))
.build();
when(mockServerCall.getAttributes()).thenReturn(attr);
PolicyMatcher policyMatcher = PolicyMatcher.create("policy-matcher",
OrMatcher.create(DestinationPortMatcher.create(99999)),
OrMatcher.create(AlwaysTrueMatcher.INSTANCE));
AuthConfig authconfig = AuthConfig.create(Collections.singletonList(policyMatcher),
GrpcAuthorizationEngine.Action.ALLOW);
FILTER_PROVIDER.newInstance(name).buildServerInterceptor(RbacConfig.create(authconfig), null)
.interceptCall(mockServerCall, new Metadata(), mockHandler);
verify(mockHandler, never()).startCall(eq(mockServerCall), any(Metadata.class));
ArgumentCaptor<Status> captor = ArgumentCaptor.forClass(Status.class);
verify(mockServerCall).close(captor.capture(), any(Metadata.class));
assertThat(captor.getValue().getCode()).isEqualTo(Status.PERMISSION_DENIED.getCode());
assertThat(captor.getValue().getDescription()).isEqualTo("Access Denied");
verify(mockServerCall).getAttributes();
verifyNoMoreInteractions(mockServerCall);
authconfig = AuthConfig.create(Collections.singletonList(policyMatcher),
GrpcAuthorizationEngine.Action.DENY);
FILTER_PROVIDER.newInstance(name).buildServerInterceptor(RbacConfig.create(authconfig), null)
.interceptCall(mockServerCall, new Metadata(), mockHandler);
verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class));
}
@Test
public void handleException() {
PathMatcher pathMatcher = PathMatcher.newBuilder()
.setPath(StringMatcher.newBuilder().build()).build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setUrlPath(pathMatcher).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setUrlPath(pathMatcher).build());
ConfigOrError<?> result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNotNull();
permissionList = Arrays.asList(Permission.newBuilder().build());
principalList = Arrays.asList(Principal.newBuilder().build());
result = parse(permissionList, principalList);
assertThat(result.errorDetail).isNotNull();
Message rawProto = io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(RBAC.newBuilder().setAction(Action.DENY)
.putPolicies("policy-name",
Policy.newBuilder().setCondition(Expr.newBuilder().build()).build())
.build()).build();
result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto));
assertThat(result.errorDetail).isNotNull();
}
@Test
@SuppressWarnings("unchecked")
public void overrideConfig() {
ServerCallHandler<Void, Void> mockHandler = mock(ServerCallHandler.class);
ServerCall<Void, Void> mockServerCall = mock(ServerCall.class);
Attributes attr = Attributes.newBuilder()
.set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, new InetSocketAddress("1::", 20))
.build();
when(mockServerCall.getAttributes()).thenReturn(attr);
PolicyMatcher policyMatcher = PolicyMatcher.create("policy-matcher",
OrMatcher.create(DestinationPortMatcher.create(99999)),
OrMatcher.create(AlwaysTrueMatcher.INSTANCE));
AuthConfig authconfig = AuthConfig.create(Collections.singletonList(policyMatcher),
GrpcAuthorizationEngine.Action.ALLOW);
RbacConfig original = RbacConfig.create(authconfig);
RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().build();
RbacConfig override = FILTER_PROVIDER.parseFilterConfigOverride(Any.pack(rbacPerRoute)).config;
assertThat(override).isEqualTo(RbacConfig.create(null));
ServerInterceptor interceptor =
FILTER_PROVIDER.newInstance(name).buildServerInterceptor(original, override);
assertThat(interceptor).isNull();
policyMatcher = PolicyMatcher.create("policy-matcher-override",
OrMatcher.create(DestinationPortMatcher.create(20)),
OrMatcher.create(AlwaysTrueMatcher.INSTANCE));
authconfig = AuthConfig.create(Collections.singletonList(policyMatcher),
GrpcAuthorizationEngine.Action.ALLOW);
override = RbacConfig.create(authconfig);
FILTER_PROVIDER.newInstance(name).buildServerInterceptor(original, override)
.interceptCall(mockServerCall, new Metadata(), mockHandler);
verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class));
verify(mockServerCall).getAttributes();
verifyNoMoreInteractions(mockServerCall);
}
@Test
public void ignoredConfig() {
Message rawProto = io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(RBAC.newBuilder().setAction(Action.LOG)
.putPolicies("policy-name", Policy.newBuilder().build()).build()).build();
ConfigOrError<RbacConfig> result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto));
assertThat(result.config).isEqualTo(RbacConfig.create(null));
}
@Test
public void testOrderIndependenceOfPolicies() {
Message rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), true);
ConfigOrError<RbacConfig> ascFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto));
rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), false);
ConfigOrError<RbacConfig> ascLast = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto));
assertThat(ascFirst.config).isEqualTo(ascLast.config);
rawProto = buildComplexRbac(ImmutableList.of(6, 5, 4, 3, 2, 1), true);
ConfigOrError<RbacConfig> decFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto));
assertThat(ascFirst.config).isEqualTo(decFirst.config);
}
private static Metadata metadata(String key, String value) {
Metadata metadata = new Metadata();
metadata.put(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value);
return metadata;
}
private MethodDescriptor.Builder<Void, Void> method() {
return MethodDescriptor.<Void,Void>newBuilder()
.setType(MethodType.BIDI_STREAMING)
.setFullMethodName(PATH)
.setRequestMarshaller(TestMethodDescriptors.voidMarshaller())
.setResponseMarshaller(TestMethodDescriptors.voidMarshaller());
}
private ConfigOrError<RbacConfig> parse(List<Permission> permissionList,
List<Principal> principalList) {
return RbacFilter.Provider.parseRbacConfig(buildRbac(permissionList, principalList));
}
private ConfigOrError<RbacConfig> parseRaw(List<Permission> permissionList,
List<Principal> principalList) {
Message rawProto = buildRbac(permissionList, principalList);
Any proto = Any.pack(rawProto);
return FILTER_PROVIDER.parseFilterConfig(proto);
}
private io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC buildRbac(
List<Permission> permissionList, List<Principal> principalList) {
return io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(buildRbacRule("policy-name", Action.DENY,
permissionList, principalList)).build();
}
private static RBAC buildRbacRule(String policyName, Action action,
List<Permission> permissionList, List<Principal> principalList) {
return RBAC.newBuilder().setAction(action)
.putPolicies(policyName, Policy.newBuilder()
.addAllPermissions(permissionList)
.addAllPrincipals(principalList).build())
.build();
}
private io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC buildComplexRbac(
List<Integer> ids, boolean listsFirst) {
Policy policy1 = createSimplePolicyUsingLists(0);
RBAC.Builder ruleBuilder = RBAC.newBuilder().setAction(Action.DENY);
if (listsFirst) {
ruleBuilder.putPolicies("list-policy", policy1);
}
String base = "filterConfig\\u003dRbacConfig{authConfig\\u003dAuthConfig{policies\\u003d[Poli"
+ "cyMatcher{name\\u003dpsm-interop-authz-policy-20230514-0917-er2uh_td_rbac_rule_";
for (Integer id : ids) {
ruleBuilder.putPolicies(base + id, createSimplePolicyUsingLists(id));
}
if (!listsFirst) {
ruleBuilder.putPolicies("list-policy", policy1);
}
return io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(ruleBuilder.build()).build();
}
private static Policy createSimplePolicyUsingLists(int id) {
CidrRange cidrRange = CidrRange.newBuilder().setAddressPrefix("10.10." + id + ".0")
.setPrefixLen(UInt32Value.of(24)).build();
List<Permission> permissionList = Arrays.asList(
Permission.newBuilder().setAndRules(Permission.Set.newBuilder()
.addRules(Permission.newBuilder().setDestinationIp(cidrRange).build())
.addRules(Permission.newBuilder().setDestinationPort(9090).build()).build()
).build());
List<Principal> principalList = Arrays.asList(
Principal.newBuilder().setAndIds(Principal.Set.newBuilder()
.addIds(Principal.newBuilder().setDirectRemoteIp(cidrRange).build())
.addIds(Principal.newBuilder().setRemoteIp(cidrRange).build())
.build()).build());
return Policy.newBuilder()
.addAllPermissions(permissionList)
.addAllPrincipals(principalList).build();
}
private ConfigOrError<RbacConfig> parseOverride(List<Permission> permissionList,
List<Principal> principalList) {
RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().setRbac(
buildRbac(permissionList, principalList)).build();
Any proto = Any.pack(rbacPerRoute);
return FILTER_PROVIDER.parseFilterConfigOverride(proto);
}
}
|
RbacFilterTest
|
java
|
grpc__grpc-java
|
benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/BenchmarkServiceGrpc.java
|
{
"start": 160,
"end": 13244
}
|
class ____ {
private BenchmarkServiceGrpc() {}
public static final java.lang.String SERVICE_NAME = "grpc.testing.BenchmarkService";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getUnaryCallMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "UnaryCall",
requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class,
responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getUnaryCallMethod() {
io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse> getUnaryCallMethod;
if ((getUnaryCallMethod = BenchmarkServiceGrpc.getUnaryCallMethod) == null) {
synchronized (BenchmarkServiceGrpc.class) {
if ((getUnaryCallMethod = BenchmarkServiceGrpc.getUnaryCallMethod) == null) {
BenchmarkServiceGrpc.getUnaryCallMethod = getUnaryCallMethod =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnaryCall"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.setSchemaDescriptor(new BenchmarkServiceMethodDescriptorSupplier("UnaryCall"))
.build();
}
}
}
return getUnaryCallMethod;
}
private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingCallMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamingCall",
requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class,
responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingCallMethod() {
io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingCallMethod;
if ((getStreamingCallMethod = BenchmarkServiceGrpc.getStreamingCallMethod) == null) {
synchronized (BenchmarkServiceGrpc.class) {
if ((getStreamingCallMethod = BenchmarkServiceGrpc.getStreamingCallMethod) == null) {
BenchmarkServiceGrpc.getStreamingCallMethod = getStreamingCallMethod =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamingCall"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.setSchemaDescriptor(new BenchmarkServiceMethodDescriptorSupplier("StreamingCall"))
.build();
}
}
}
return getStreamingCallMethod;
}
private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromClientMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamingFromClient",
requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class,
responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING)
public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromClientMethod() {
io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromClientMethod;
if ((getStreamingFromClientMethod = BenchmarkServiceGrpc.getStreamingFromClientMethod) == null) {
synchronized (BenchmarkServiceGrpc.class) {
if ((getStreamingFromClientMethod = BenchmarkServiceGrpc.getStreamingFromClientMethod) == null) {
BenchmarkServiceGrpc.getStreamingFromClientMethod = getStreamingFromClientMethod =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamingFromClient"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.setSchemaDescriptor(new BenchmarkServiceMethodDescriptorSupplier("StreamingFromClient"))
.build();
}
}
}
return getStreamingFromClientMethod;
}
private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromServerMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamingFromServer",
requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class,
responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING)
public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromServerMethod() {
io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingFromServerMethod;
if ((getStreamingFromServerMethod = BenchmarkServiceGrpc.getStreamingFromServerMethod) == null) {
synchronized (BenchmarkServiceGrpc.class) {
if ((getStreamingFromServerMethod = BenchmarkServiceGrpc.getStreamingFromServerMethod) == null) {
BenchmarkServiceGrpc.getStreamingFromServerMethod = getStreamingFromServerMethod =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamingFromServer"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.setSchemaDescriptor(new BenchmarkServiceMethodDescriptorSupplier("StreamingFromServer"))
.build();
}
}
}
return getStreamingFromServerMethod;
}
private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingBothWaysMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamingBothWays",
requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class,
responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest,
io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingBothWaysMethod() {
io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse> getStreamingBothWaysMethod;
if ((getStreamingBothWaysMethod = BenchmarkServiceGrpc.getStreamingBothWaysMethod) == null) {
synchronized (BenchmarkServiceGrpc.class) {
if ((getStreamingBothWaysMethod = BenchmarkServiceGrpc.getStreamingBothWaysMethod) == null) {
BenchmarkServiceGrpc.getStreamingBothWaysMethod = getStreamingBothWaysMethod =
io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto.Messages.SimpleResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamingBothWays"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.grpc.benchmarks.proto.Messages.SimpleResponse.getDefaultInstance()))
.setSchemaDescriptor(new BenchmarkServiceMethodDescriptorSupplier("StreamingBothWays"))
.build();
}
}
}
return getStreamingBothWaysMethod;
}
/**
* Creates a new async stub that supports all call types for the service
*/
public static BenchmarkServiceStub newStub(io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceStub>() {
@java.lang.Override
public BenchmarkServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new BenchmarkServiceStub(channel, callOptions);
}
};
return BenchmarkServiceStub.newStub(factory, channel);
}
/**
* Creates a new blocking-style stub that supports all types of calls on the service
*/
public static BenchmarkServiceBlockingV2Stub newBlockingV2Stub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceBlockingV2Stub> factory =
new io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceBlockingV2Stub>() {
@java.lang.Override
public BenchmarkServiceBlockingV2Stub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new BenchmarkServiceBlockingV2Stub(channel, callOptions);
}
};
return BenchmarkServiceBlockingV2Stub.newStub(factory, channel);
}
/**
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
*/
public static BenchmarkServiceBlockingStub newBlockingStub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceBlockingStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceBlockingStub>() {
@java.lang.Override
public BenchmarkServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new BenchmarkServiceBlockingStub(channel, callOptions);
}
};
return BenchmarkServiceBlockingStub.newStub(factory, channel);
}
/**
* Creates a new ListenableFuture-style stub that supports unary calls on the service
*/
public static BenchmarkServiceFutureStub newFutureStub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceFutureStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<BenchmarkServiceFutureStub>() {
@java.lang.Override
public BenchmarkServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new BenchmarkServiceFutureStub(channel, callOptions);
}
};
return BenchmarkServiceFutureStub.newStub(factory, channel);
}
/**
*/
public
|
BenchmarkServiceGrpc
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
|
{
"start": 9965,
"end": 11250
}
|
class ____ 42:3 parent 42:1 prio 0 rate 7000Kbit ceil 7000Kbit burst 1598b cburst 1598b
BatchBuilder builder = new BatchBuilder(PrivilegedOperation.
OperationType.TC_READ_STATE)
.readState();
PrivilegedOperation op = builder.commitBatchToTempFile();
try {
String output =
privilegedOperationExecutor.executePrivilegedOperation(op, true);
LOG.debug("TC state: {}" + output);
return output;
} catch (PrivilegedOperationException e) {
LOG.warn("Failed to bootstrap outbound bandwidth rules");
throw new ResourceHandlerException(
"Failed to bootstrap outbound bandwidth rules", e);
}
}
private void wipeState() throws ResourceHandlerException {
BatchBuilder builder = new BatchBuilder(PrivilegedOperation.
OperationType.TC_MODIFY_STATE)
.wipeState();
PrivilegedOperation op = builder.commitBatchToTempFile();
try {
LOG.info("Wiping tc state.");
privilegedOperationExecutor.executePrivilegedOperation(op, false);
} catch (PrivilegedOperationException e) {
LOG.warn("Failed to wipe tc state. This could happen if the interface" +
" is already in its default state. Ignoring.");
//Ignoring this exception. This could happen if the
|
htb
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java
|
{
"start": 1390,
"end": 3329
}
|
class ____ extends AbstractCatAction {
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_cat/repositories"));
}
@Override
protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) {
final var getRepositoriesRequest = new GetRepositoriesRequest(getMasterNodeTimeout(request), Strings.EMPTY_ARRAY);
getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local()));
return channel -> client.admin()
.cluster()
.getRepositories(getRepositoriesRequest, new RestResponseListener<GetRepositoriesResponse>(channel) {
@Override
public RestResponse buildResponse(GetRepositoriesResponse getRepositoriesResponse) throws Exception {
return RestTable.buildResponse(buildTable(request, getRepositoriesResponse), channel);
}
});
}
@Override
public String getName() {
return "cat_repositories_action";
}
@Override
protected void documentation(StringBuilder sb) {
sb.append("/_cat/repositories\n");
}
@Override
protected Table getTableWithHeader(RestRequest request) {
return new Table().startHeaders()
.addCell("id", "alias:id,repoId;desc:unique repository id")
.addCell("type", "alias:t,type;text-align:right;desc:repository type")
.endHeaders();
}
private Table buildTable(RestRequest req, GetRepositoriesResponse getRepositoriesResponse) {
Table table = getTableWithHeader(req);
for (RepositoryMetadata repositoryMetadata : getRepositoriesResponse.repositories()) {
table.startRow();
table.addCell(repositoryMetadata.name());
table.addCell(repositoryMetadata.type());
table.endRow();
}
return table;
}
}
|
RestRepositoriesAction
|
java
|
junit-team__junit5
|
junit-jupiter-params/src/main/java/org/junit/jupiter/params/AfterParameterizedClassInvocation.java
|
{
"start": 4991,
"end": 5212
}
|
class ____ implements the interface.
*
* <p>JUnit Jupiter does not guarantee the execution order of multiple
* {@code @AfterParameterizedClassInvocation} methods that are declared within a
* single parameterized test
|
that
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/scheduling/annotation/EnableAsyncTests.java
|
{
"start": 14813,
"end": 14934
}
|
class ____ extends AsyncBean implements Runnable {
@Override
public void run() {
}
}
static
|
AsyncBeanWithInterface
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java
|
{
"start": 29477,
"end": 30202
}
|
class ____ implements Setting.Validator<Integer> {
@Override
public void validate(final Integer value) {}
@Override
public void validate(final Integer low, final Map<Setting<?>, Object> settings) {
if (settings.containsKey(SETTING_FOO_HIGH) && low > (int) settings.get(SETTING_FOO_HIGH)) {
throw new IllegalArgumentException("[low]=" + low + " is higher than [high]=" + settings.get(SETTING_FOO_HIGH));
}
}
@Override
public Iterator<Setting<?>> settings() {
final List<Setting<?>> settings = List.of(SETTING_FOO_HIGH);
return settings.iterator();
}
}
private static
|
FooLowSettingValidator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFlags.java
|
{
"start": 809,
"end": 3686
}
|
class ____ {
public static final Setting<Boolean> ESQL_STRING_LIKE_ON_INDEX = Setting.boolSetting(
"esql.query.string_like_on_index",
true,
Setting.Property.NodeScope,
Setting.Property.Dynamic
);
/**
* The maximum number of rounding points to push down to Lucene for the {@code roundTo} function at cluster level.
* {@code ReplaceRoundToWithQueryAndTags} checks this threshold before rewriting {@code RoundTo} to range queries.
*
* There is also a query level ROUNDTO_PUSHDOWN_THRESHOLD defined in {@code QueryPragmas}.
* The cluster level threshold defaults to 127, it is the same as the maximum number of buckets used in {@code Rounding}.
* The query level threshold defaults to -1, which means this query level setting is not set and cluster level upper limit will be used.
* If query level threshold is set to greater than or equals to 0, the query level threshold will be used, and it overrides the cluster
* level threshold.
*
* If the cluster level threshold is set to -1 or 0, no {@code RoundTo} pushdown will be performed, query level threshold is not set to
* -1 or 0.
*/
public static final Setting<Integer> ESQL_ROUNDTO_PUSHDOWN_THRESHOLD = Setting.intSetting(
"esql.query.roundto_pushdown_threshold",
127,
-1,
Setting.Property.NodeScope,
Setting.Property.Dynamic
);
// this is only used for testing purposes right now
public static List<Setting<?>> ALL_ESQL_FLAGS_SETTINGS = List.of(ESQL_STRING_LIKE_ON_INDEX, ESQL_ROUNDTO_PUSHDOWN_THRESHOLD);
private final boolean stringLikeOnIndex;
private final int roundToPushdownThreshold;
/**
* Constructor for tests.
*/
public EsqlFlags(boolean stringLikeOnIndex) {
this.stringLikeOnIndex = stringLikeOnIndex;
this.roundToPushdownThreshold = ESQL_ROUNDTO_PUSHDOWN_THRESHOLD.getDefault(Settings.EMPTY);
}
/**
* Constructor for tests.
*/
public EsqlFlags(int roundToPushdownThreshold) {
this.stringLikeOnIndex = ESQL_STRING_LIKE_ON_INDEX.getDefault(Settings.EMPTY);
this.roundToPushdownThreshold = roundToPushdownThreshold;
}
/**
* Constructor for tests.
*/
public EsqlFlags(boolean stringLikeOnIndex, int roundToPushdownThreshold) {
this.stringLikeOnIndex = stringLikeOnIndex;
this.roundToPushdownThreshold = roundToPushdownThreshold;
}
public EsqlFlags(ClusterSettings settings) {
this.stringLikeOnIndex = settings.get(ESQL_STRING_LIKE_ON_INDEX);
this.roundToPushdownThreshold = settings.get(ESQL_ROUNDTO_PUSHDOWN_THRESHOLD);
}
public boolean stringLikeOnIndex() {
return stringLikeOnIndex;
}
public int roundToPushdownThreshold() {
return roundToPushdownThreshold;
}
}
|
EsqlFlags
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/CheckpointedResultPartition.java
|
{
"start": 1226,
"end": 1747
}
|
interface ____ {
/** Gets the checkpointed subpartition info with the given subpartitionIndex. */
ResultSubpartitionInfo getCheckpointedSubpartitionInfo(int subpartitionIndex);
void finishReadRecoveredState(boolean notifyAndBlockOnCompletion) throws IOException;
BufferBuilder requestBufferBuilderBlocking()
throws IOException, RuntimeException, InterruptedException;
void addRecovered(int subpartitionIndex, BufferConsumer bufferConsumer) throws IOException;
}
|
CheckpointedResultPartition
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/internal/SessionEventListenerManagerImpl.java
|
{
"start": 463,
"end": 5855
}
|
class ____ implements SessionEventListenerManager, Serializable {
private SessionEventListener[] listeners;
public SessionEventListenerManagerImpl(SessionEventListener... initialListener) {
//no need for defensive copies until the array is mutated:
listeners = initialListener;
}
public SessionEventListenerManagerImpl(List<SessionEventListener> initialListener) {
//no need for defensive copies until the array is mutated:
listeners = initialListener.toArray( new SessionEventListener[0] );
}
@Override
public void addListener(final SessionEventListener... additionalListeners) {
requireNonNull( additionalListeners );
final var existing = listeners;
if ( existing == null ) {
//Make a defensive copy as this array can be tracked back to API (user code)
listeners = copyOf( additionalListeners, additionalListeners.length );
}
else {
// Resize our existing array and add the new listeners
final var newList = new SessionEventListener[ existing.length + additionalListeners.length ];
arraycopy( existing, 0, newList, 0, existing.length );
arraycopy( additionalListeners, 0, newList, existing.length, additionalListeners.length );
listeners = newList;
}
}
@Override
public void transactionCompletion(boolean successful) {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.transactionCompletion( successful );
}
}
}
@Override
public void jdbcConnectionAcquisitionStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcConnectionAcquisitionStart();
}
}
}
@Override
public void jdbcConnectionAcquisitionEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcConnectionAcquisitionEnd();
}
}
}
@Override
public void jdbcConnectionReleaseStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcConnectionReleaseStart();
}
}
}
@Override
public void jdbcConnectionReleaseEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcConnectionReleaseEnd();
}
}
}
@Override
public void jdbcPrepareStatementStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcPrepareStatementStart();
}
}
}
@Override
public void jdbcPrepareStatementEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcPrepareStatementEnd();
}
}
}
@Override
public void jdbcExecuteStatementStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcExecuteStatementStart();
}
}
}
@Override
public void jdbcExecuteStatementEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcExecuteStatementEnd();
}
}
}
@Override
public void jdbcExecuteBatchStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcExecuteBatchStart();
}
}
}
@Override
public void jdbcExecuteBatchEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.jdbcExecuteBatchEnd();
}
}
}
@Override
public void cachePutStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.cachePutStart();
}
}
}
@Override
public void cachePutEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.cachePutEnd();
}
}
}
@Override
public void cacheGetStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.cacheGetStart();
}
}
}
@Override
public void cacheGetEnd(boolean hit) {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.cacheGetEnd( hit );
}
}
}
@Override
public void flushStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.flushStart();
}
}
}
@Override
public void flushEnd(int numberOfEntities, int numberOfCollections) {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.flushEnd( numberOfEntities, numberOfCollections );
}
}
}
@Override
public void prePartialFlushStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.prePartialFlushStart();
}
}
}
@Override
public void prePartialFlushEnd() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.prePartialFlushEnd();
}
}
}
@Override
public void partialFlushStart() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.partialFlushStart();
}
}
}
@Override
public void partialFlushEnd(int numberOfEntities, int numberOfCollections) {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.partialFlushEnd( numberOfEntities, numberOfCollections );
}
}
}
@Override
public void dirtyCalculationStart() {
if ( listeners == null ) {
return;
}
for ( var listener : listeners ) {
listener.dirtyCalculationStart();
}
}
@Override
public void dirtyCalculationEnd(boolean dirty) {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.dirtyCalculationEnd( dirty );
}
}
}
@Override
public void end() {
if ( listeners != null ) {
for ( var listener : listeners ) {
listener.end();
}
}
}
}
|
SessionEventListenerManagerImpl
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestVectoredReadUtils.java
|
{
"start": 18611,
"end": 29425
}
|
interface ____ extends PositionedReadable, ByteBufferPositionedReadable {
// nothing
}
/**
* Fill a buffer with bytes incremented from 0.
* @param buffer target buffer.
*/
private static void fillBuffer(ByteBuffer buffer) {
byte b = 0;
while (buffer.remaining() > 0) {
buffer.put(b++);
}
}
/**
* Read a single range, verify the future completed and validate the buffer
* returned.
*/
@Test
public void testReadSingleRange() throws Exception {
final Stream stream = mockStreamWithReadFully();
CompletableFuture<ByteBuffer> result =
readRangeFrom(stream, createFileRange(1000, 100),
ByteBuffer::allocate);
assertFutureCompletedSuccessfully(result);
ByteBuffer buffer = result.get();
assertEquals(100, buffer.remaining(), "Size of result buffer");
byte b = 0;
while (buffer.remaining() > 0) {
assertEquals(b++, buffer.get(), "remain = " + buffer.remaining());
}
}
/**
* Read a single range with IOE fault injection; verify the failure
* is reported.
*/
@Test
public void testReadWithIOE() throws Exception {
final Stream stream = mockStreamWithReadFully();
Mockito.doThrow(new IOException("foo"))
.when(stream).readFully(ArgumentMatchers.anyLong(),
ArgumentMatchers.any(ByteBuffer.class));
CompletableFuture<ByteBuffer> result =
readRangeFrom(stream, createFileRange(1000, 100), ByteBuffer::allocate);
assertFutureFailedExceptionally(result);
}
/**
* Read a range, first successfully, then with an IOE.
* the output of the first read is validated.
* @param allocate allocator to use
*/
private static void runReadRangeFromPositionedReadable(IntFunction<ByteBuffer> allocate)
throws Exception {
PositionedReadable stream = Mockito.mock(PositionedReadable.class);
Mockito.doAnswer(invocation -> {
byte b=0;
byte[] buffer = invocation.getArgument(1);
for(int i=0; i < buffer.length; ++i) {
buffer[i] = b++;
}
return null;
}).when(stream).readFully(ArgumentMatchers.anyLong(),
ArgumentMatchers.any(), ArgumentMatchers.anyInt(),
ArgumentMatchers.anyInt());
CompletableFuture<ByteBuffer> result =
readRangeFrom(stream, createFileRange(1000, 100),
allocate);
assertFutureCompletedSuccessfully(result);
ByteBuffer buffer = result.get();
assertEquals(100, buffer.remaining(), "Size of result buffer");
validateBuffer("buffer", buffer, 0);
// test an IOException
Mockito.reset(stream);
Mockito.doThrow(new IOException("foo"))
.when(stream).readFully(ArgumentMatchers.anyLong(),
ArgumentMatchers.any(), ArgumentMatchers.anyInt(),
ArgumentMatchers.anyInt());
result = readRangeFrom(stream, createFileRange(1000, 100),
ByteBuffer::allocate);
assertFutureFailedExceptionally(result);
}
/**
* Read into an on heap buffer.
*/
@Test
public void testReadRangeArray() throws Exception {
runReadRangeFromPositionedReadable(ByteBuffer::allocate);
}
/**
* Read into an off-heap buffer.
*/
@Test
public void testReadRangeDirect() throws Exception {
runReadRangeFromPositionedReadable(ByteBuffer::allocateDirect);
}
/**
* Validate a buffer where the first byte value is {@code start}
* and the subsequent bytes are from that value incremented by one, wrapping
* at 256.
* @param message error message.
* @param buffer buffer
* @param start first byte of the buffer.
*/
private static void validateBuffer(String message, ByteBuffer buffer, int start) {
byte expected = (byte) start;
while (buffer.remaining() > 0) {
assertEquals(expected,
buffer.get(), message + " remain: " + buffer.remaining());
// increment with wrapping.
expected = (byte) (expected + 1);
}
}
/**
* Validate basic read vectored works as expected.
*/
@Test
public void testReadVectored() throws Exception {
List<FileRange> input = asList(createFileRange(0, 100),
createFileRange(100_000, 100, "this"),
createFileRange(200_000, 100, "that"));
runAndValidateVectoredRead(input);
}
/**
* Verify a read with length 0 completes with a buffer of size 0.
*/
@Test
public void testReadVectoredZeroBytes() throws Exception {
List<FileRange> input = asList(createFileRange(0, 0, "1"),
createFileRange(100_000, 100, "2"),
createFileRange(200_000, 0, "3"));
runAndValidateVectoredRead(input);
// look up by name and validate.
final FileRange r1 = retrieve(input, "1");
assertThat(r1.getData().get().limit())
.describedAs("Data limit of %s", r1)
.isEqualTo(0);
}
/**
* Retrieve a range from a list of ranges by its (string) reference.
* @param input input list
* @param key key to look up
* @return the range
* @throws IllegalArgumentException if the range is not found.
*/
private static FileRange retrieve(List<FileRange> input, String key) {
return input.stream()
.filter(r -> key.equals(r.getReference()))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("No range with key " + key));
}
/**
* Mock run a vectored read and validate the results with the
* <ol>
* <li> {@code ByteBufferPositionedReadable.readFully()} is invoked once per range.</li>
* <li> The buffers are filled with data</li>
* </ol>
* @param input input ranges
* @throws Exception failure
*/
private void runAndValidateVectoredRead(List<FileRange> input)
throws Exception {
final Stream stream = mockStreamWithReadFully();
// should not merge the ranges
readVectored(stream, input, ByteBuffer::allocate);
// readFully is invoked once per range
Mockito.verify(stream, Mockito.times(input.size()))
.readFully(ArgumentMatchers.anyLong(), ArgumentMatchers.any(ByteBuffer.class));
// validate each buffer
for (int b = 0; b < input.size(); ++b) {
validateBuffer("buffer " + b, input.get(b).getData().get(), 0);
}
}
/**
* Mock a stream with {@link Stream#readFully(long, ByteBuffer)}.
* Filling in each byte buffer.
* @return the stream
* @throws IOException (side effect of the mocking;
*/
private static Stream mockStreamWithReadFully() throws IOException {
Stream stream = Mockito.mock(Stream.class);
Mockito.doAnswer(invocation -> {
fillBuffer(invocation.getArgument(1));
return null;
}).when(stream).readFully(ArgumentMatchers.anyLong(),
ArgumentMatchers.any(ByteBuffer.class));
return stream;
}
/**
* Empty ranges are allowed.
*/
@Test
public void testEmptyRangesAllowed() throws Throwable {
validateAndSortRanges(Collections.emptyList(), Optional.empty());
}
/**
* Reject negative offsets.
*/
@Test
public void testNegativeOffsetRaisesEOF() throws Throwable {
intercept(EOFException.class, () ->
validateAndSortRanges(asList(
createFileRange(1000, 100),
createFileRange(-1000, 100)),
Optional.empty()));
}
/**
* Reject negative lengths.
*/
@Test
public void testNegativePositionRaisesIllegalArgument() throws Throwable {
intercept(IllegalArgumentException.class, () ->
validateAndSortRanges(asList(
createFileRange(1000, 100),
createFileRange(1000, -100)),
Optional.empty()));
}
/**
* A read for a whole file is valid.
*/
@Test
public void testReadWholeFile() throws Exception {
final int length = 1000;
// Read whole file as one element
final List<? extends FileRange> ranges = validateAndSortRanges(
asList(createFileRange(0, length)),
Optional.of((long) length));
assertIsSingleRange(ranges, 0, length);
}
/**
* A read from start of file to past EOF is rejected.
*/
@Test
public void testReadPastEOFRejected() throws Exception {
final int length = 1000;
intercept(EOFException.class, () ->
validateAndSortRanges(
asList(createFileRange(0, length + 1)),
Optional.of((long) length)));
}
/**
* If the start offset is at the end of the file: an EOFException.
*/
@Test
public void testReadStartingPastEOFRejected() throws Exception {
final int length = 1000;
intercept(EOFException.class, () ->
validateAndSortRanges(
asList(createFileRange(length, 0)),
Optional.of((long) length)));
}
/**
* A read from just below the EOF to the end of the file is valid.
*/
@Test
public void testReadUpToEOF() throws Exception {
final int length = 1000;
final int p = length - 1;
assertIsSingleRange(
validateAndSortRanges(
asList(createFileRange(p, 1)),
Optional.of((long) length)),
p, 1);
}
/**
* A read from just below the EOF to the just past the end of the file is rejected
* with EOFException.
*/
@Test
public void testReadOverEOFRejected() throws Exception {
final long length = 1000;
intercept(EOFException.class, () ->
validateAndSortRanges(
asList(createFileRange(length - 1, 2)),
Optional.of(length)));
}
@Test
public void testVectorIOBufferPool() throws Throwable {
ElasticByteBufferPool elasticByteBufferPool = new ElasticByteBufferPool();
// inlined lambda to assert the pool size
Consumer<Integer> assertPoolSizeEquals = (size) -> {
assertThat(elasticByteBufferPool.size(false))
.describedAs("Pool size")
.isEqualTo(size);
};
// build vector pool from the buffer pool operations converted to
// allocate and release lambda expressions
ByteBufferPool vectorBuffers = new VectorIOBufferPool(
r -> elasticByteBufferPool.getBuffer(false, r),
elasticByteBufferPool::putBuffer);
assertPoolSizeEquals.accept(0);
final ByteBuffer b1 = vectorBuffers.getBuffer(false, 100);
final ByteBuffer b2 = vectorBuffers.getBuffer(false, 50);
// return the first buffer for a pool size of 1
vectorBuffers.putBuffer(b1);
assertPoolSizeEquals.accept(1);
// expect the returned buffer back
ByteBuffer b3 = vectorBuffers.getBuffer(true, 100);
assertThat(b3)
.describedAs("buffer returned from a get after a previous one was returned")
.isSameAs(b1);
assertPoolSizeEquals.accept(0);
// return them all
vectorBuffers.putBuffer(b2);
vectorBuffers.putBuffer(b3);
assertPoolSizeEquals.accept(2);
// release does not propagate
vectorBuffers.release();
assertPoolSizeEquals.accept(2);
elasticByteBufferPool.release();
}
}
|
Stream
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-base/src/main/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImpl.java
|
{
"start": 1884,
"end": 2032
}
|
class ____ NOT thread safe and relies on external synchronization.
*
* <p><b>Note:</b> If any of the methods to add parts throws an exception, this
|
is
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GooglePubsubLiteEndpointBuilderFactory.java
|
{
"start": 1468,
"end": 1623
}
|
interface ____ {
/**
* Builder for endpoint consumers for the Google PubSub Lite component.
*/
public
|
GooglePubsubLiteEndpointBuilderFactory
|
java
|
bumptech__glide
|
instrumentation/src/androidTest/java/com/bumptech/glide/load/resource/bitmap/DownsamplerEmulatorTest.java
|
{
"start": 32383,
"end": 33762
}
|
class ____ {
private int sourceWidth;
private int sourceHeight;
private int targetWidth;
private int targetHeight;
private boolean hasGainmap;
private boolean allowHardwareConfig;
@Nullable private Api[] apis;
public Builder setSourceWidth(int sourceWidth) {
this.sourceWidth = sourceWidth;
return this;
}
public Builder setSourceHeight(int sourceHeight) {
this.sourceHeight = sourceHeight;
return this;
}
public Builder setTargetWidth(int targetWidth) {
this.targetWidth = targetWidth;
return this;
}
public Builder setTargetHeight(int targetHeight) {
this.targetHeight = targetHeight;
return this;
}
public Builder setHasGainmap(boolean hasGainmap) {
this.hasGainmap = hasGainmap;
return this;
}
public Builder setAllowHardwareConfig(boolean allowHardwareConfig) {
this.allowHardwareConfig = allowHardwareConfig;
return this;
}
public Builder setApis(Api[] apis) {
this.apis = apis;
return this;
}
public TestCase build() {
Preconditions.checkNotNull(apis);
return new TestCase(this);
}
}
}
}
static final
|
Builder
|
java
|
alibaba__fastjson
|
src/main/java/com/alibaba/fastjson/asm/ClassWriter.java
|
{
"start": 11022,
"end": 11959
}
|
class ____ build. Does nothing if the constant pool already
* contains a similar item.
*
* @param owner the internal name of the field's owner class.
* @param name the field's name.
* @param desc the field's descriptor.
* @return a new or already existing field reference item.
*/
Item newFieldItem(final String owner, final String name, final String desc) {
key3.set(9 /* FIELD */, owner, name, desc);
Item result = get(key3);
if (result == null) {
// put122(9 /* FIELD */, newClassItem(owner).index, newNameTypeItem(name, desc).index);
int s1 = newClassItem(owner).index, s2 = newNameTypeItem(name, desc).index;
pool.put12(9 /* FIELD */, s1).putShort(s2);
result = new Item(index++, key3);
put(result);
}
return result;
}
/**
* Adds a method reference to the constant pool of the
|
being
|
java
|
quarkusio__quarkus
|
extensions/mailer/runtime/src/main/java/io/quarkus/mailer/runtime/BlockingMailerImpl.java
|
{
"start": 296,
"end": 928
}
|
class ____ implements Mailer {
private final ReactiveMailer mailer;
private final Duration timeout;
BlockingMailerImpl(ReactiveMailer mailer, Duration timeout) {
this.mailer = mailer;
this.timeout = timeout;
}
@Override
public void send(Mail... mails) {
if (timeout == null || timeout.isZero()) {
// Backward compatibility: if timeout is 0 or null, wait indefinitely
mailer.send(mails).await().indefinitely();
} else {
// Use the configured timeout
mailer.send(mails).await().atMost(timeout);
}
}
}
|
BlockingMailerImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTests.java
|
{
"start": 1208,
"end": 7835
}
|
class ____ extends ESTestCase {
public void testParsingFromUrl() throws Exception {
HttpRequest.Builder builder = HttpRequest.builder("www.example.org", 1234);
builder.path("/foo/bar/org");
builder.setParam("param", "test");
builder.scheme(Scheme.HTTPS);
assertThatManualBuilderEqualsParsingFromUrl("https://www.example.org:1234/foo/bar/org?param=test", builder);
// test without specifying port
builder = HttpRequest.builder("www.example.org", 80);
assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org", builder);
// encoded values
builder = HttpRequest.builder("www.example.org", 80).setParam("foo", " white space");
assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org?foo=%20white%20space", builder);
}
public void testParsingEmptyUrl() throws Exception {
try {
HttpRequest.builder().fromUrl("");
fail("Expected exception due to empty URL");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("Configured URL is empty, please configure a valid URL"));
}
}
public void testInvalidUrlsWithMissingScheme() throws Exception {
try {
HttpRequest.builder().fromUrl("www.test.de");
fail("Expected exception due to missing scheme");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("URL [www.test.de] does not contain a scheme"));
}
}
public void testInvalidUrlsWithHost() throws Exception {
try {
HttpRequest.builder().fromUrl("https://");
fail("Expected exception due to missing host");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("Malformed URL [https://]"));
}
}
public void testXContentSerialization() throws Exception {
final HttpRequest.Builder builder;
if (randomBoolean()) {
builder = HttpRequest.builder();
builder.fromUrl("http://localhost:9200/generic/createevent");
} else {
builder = HttpRequest.builder("localhost", 9200);
if (randomBoolean()) {
builder.scheme(randomFrom(Scheme.values()));
if (usually()) {
builder.path(randomAlphaOfLength(50));
}
}
}
if (usually()) {
builder.method(randomFrom(HttpMethod.values()));
}
if (randomBoolean()) {
builder.setParam(randomAlphaOfLength(10), randomAlphaOfLength(10));
if (randomBoolean()) {
builder.setParam(randomAlphaOfLength(10), randomAlphaOfLength(10));
}
}
if (randomBoolean()) {
builder.setHeader(randomAlphaOfLength(10), randomAlphaOfLength(10));
if (randomBoolean()) {
builder.setHeader(randomAlphaOfLength(10), randomAlphaOfLength(10));
}
}
if (randomBoolean()) {
builder.auth(new BasicAuth(randomAlphaOfLength(10), randomAlphaOfLength(20).toCharArray()));
}
if (randomBoolean()) {
builder.body(randomAlphaOfLength(200));
}
if (randomBoolean()) {
builder.connectionTimeout(randomTimeout());
}
if (randomBoolean()) {
builder.readTimeout(randomTimeout());
}
if (randomBoolean()) {
builder.proxy(new HttpProxy(randomAlphaOfLength(10), randomIntBetween(1024, 65000)));
}
final HttpRequest httpRequest = builder.build();
assertNotNull(httpRequest);
try (XContentBuilder xContentBuilder = randomFrom(jsonBuilder(), smileBuilder(), yamlBuilder(), cborBuilder())) {
httpRequest.toXContent(xContentBuilder, WatcherParams.builder().hideSecrets(false).build());
try (XContentParser parser = createParser(xContentBuilder)) {
assertNull(parser.currentToken());
parser.nextToken();
HttpRequest parsedRequest = HttpRequest.Parser.parse(parser);
assertEquals(httpRequest, parsedRequest);
}
}
}
private static TimeValue randomTimeout() {
// micros and nanos don't round trip will full precision so exclude them from the test
return randomTimeValue(0, 1000, TimeUnit.DAYS, TimeUnit.HOURS, TimeUnit.MINUTES, TimeUnit.SECONDS, TimeUnit.MILLISECONDS);
}
public void testXContentRemovesAuthorization() throws Exception {
HttpRequest request = HttpRequest.builder("localhost", 443).setHeader("Authorization", "Bearer Foo").build();
try (XContentBuilder builder = jsonBuilder()) {
WatcherParams params = WatcherParams.builder().hideSecrets(false).build();
request.toXContent(builder, params);
assertThat(Strings.toString(builder), containsString("Bearer Foo"));
}
try (XContentBuilder builder = jsonBuilder()) {
request.toXContent(builder, WatcherParams.HIDE_SECRETS);
assertThat(Strings.toString(builder), not(containsString("Bearer Foo")));
assertThat(Strings.toString(builder), containsString(WatcherXContentParser.REDACTED_PASSWORD));
}
}
public void testToStringDoesNotContainAuthorizationheader() {
HttpRequest request = HttpRequest.builder("localhost", 443).setHeader("Authorization", "Bearer Foo").build();
assertThat(request.toString(), not(containsString("Bearer Foo")));
assertThat(request.toString(), containsString("Authorization: " + WatcherXContentParser.REDACTED_PASSWORD));
}
private void assertThatManualBuilderEqualsParsingFromUrl(String url, HttpRequest.Builder builder) throws Exception {
XContentBuilder urlContentBuilder = jsonBuilder().startObject().field("url", url).endObject();
XContentParser urlContentParser = createParser(urlContentBuilder);
urlContentParser.nextToken();
HttpRequest urlParsedRequest = HttpRequest.Parser.parse(urlContentParser);
WatcherParams params = WatcherParams.builder().hideSecrets(false).build();
XContentBuilder xContentBuilder = builder.build().toXContent(jsonBuilder(), params);
XContentParser xContentParser = createParser(xContentBuilder);
xContentParser.nextToken();
HttpRequest parsedRequest = HttpRequest.Parser.parse(xContentParser);
assertThat(parsedRequest, is(urlParsedRequest));
}
}
|
HttpRequestTests
|
java
|
micronaut-projects__micronaut-core
|
management/src/main/java/io/micronaut/management/endpoint/info/InfoSource.java
|
{
"start": 924,
"end": 1145
}
|
interface ____ extends Ordered {
/**
* @return A publisher that returns a {@link PropertySource} containing data to be added to the endpoint response.
*/
Publisher<PropertySource> getSource();
}
|
InfoSource
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1200/Issue1222.java
|
{
"start": 577,
"end": 642
}
|
class ____ {
public Type type;
}
public static
|
Model
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/rank/FieldBasedRerankerIT.java
|
{
"start": 16109,
"end": 17351
}
|
class ____ extends Plugin implements SearchPlugin {
private static final String FIELD_BASED_RANK_BUILDER_NAME = "field_based_rank";
private static final String THROWING_RANK_BUILDER_NAME = "throwing_rank";
@Override
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return List.of(
new NamedWriteableRegistry.Entry(RankBuilder.class, FIELD_BASED_RANK_BUILDER_NAME, FieldBasedRankBuilder::new),
new NamedWriteableRegistry.Entry(RankBuilder.class, THROWING_RANK_BUILDER_NAME, ThrowingRankBuilder::new)
);
}
@Override
public List<NamedXContentRegistry.Entry> getNamedXContent() {
return List.of(
new NamedXContentRegistry.Entry(
RankBuilder.class,
new ParseField(FIELD_BASED_RANK_BUILDER_NAME),
FieldBasedRankBuilder::fromXContent
),
new NamedXContentRegistry.Entry(
RankBuilder.class,
new ParseField(THROWING_RANK_BUILDER_NAME),
ThrowingRankBuilder::fromXContent
)
);
}
}
}
|
FieldBasedRerankerPlugin
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
|
{
"start": 3373,
"end": 28441
}
|
class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(AliyunOSSFileSystemStore.class);
private String username;
private FileSystem.Statistics statistics;
private OSSClient ossClient;
private String bucketName;
private long uploadPartSize;
private int maxKeys;
private String serverSideEncryptionAlgorithm;
private boolean useListV1;
public void initialize(URI uri, Configuration conf, String user,
FileSystem.Statistics stat) throws IOException {
this.username = user;
statistics = stat;
ClientConfiguration clientConf = new ClientConfiguration();
clientConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS_KEY,
MAXIMUM_CONNECTIONS_DEFAULT));
boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS_KEY,
SECURE_CONNECTIONS_DEFAULT);
clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
clientConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES_KEY,
MAX_ERROR_RETRIES_DEFAULT));
clientConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT_KEY,
ESTABLISH_TIMEOUT_DEFAULT));
clientConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT_KEY,
SOCKET_TIMEOUT_DEFAULT));
clientConf.setUserAgent(
conf.get(USER_AGENT_PREFIX, USER_AGENT_PREFIX_DEFAULT) + ", Hadoop/"
+ VersionInfo.getVersion());
String region = conf.get(REGION_KEY, "");
String signatureVersion = conf.get(SIGNATURE_VERSION_KEY, SIGNATURE_VERSION_DEFAULT);
if ("V4".equalsIgnoreCase(signatureVersion)) {
clientConf.setSignatureVersion(SignVersion.V4);
if (StringUtils.isEmpty(region)) {
LOG.error("Signature version is V4 ,but region is empty.");
throw new IOException("SignVersion is V4 but region is empty");
}
}
String proxyHost = conf.getTrimmed(PROXY_HOST_KEY, "");
int proxyPort = conf.getInt(PROXY_PORT_KEY, -1);
if (StringUtils.isNotEmpty(proxyHost)) {
clientConf.setProxyHost(proxyHost);
if (proxyPort >= 0) {
clientConf.setProxyPort(proxyPort);
} else {
if (secureConnections) {
LOG.warn("Proxy host set without port. Using HTTPS default 443");
clientConf.setProxyPort(443);
} else {
LOG.warn("Proxy host set without port. Using HTTP default 80");
clientConf.setProxyPort(80);
}
}
String proxyUsername = conf.getTrimmed(PROXY_USERNAME_KEY);
String proxyPassword = conf.getTrimmed(PROXY_PASSWORD_KEY);
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = "Proxy error: " + PROXY_USERNAME_KEY + " or " +
PROXY_PASSWORD_KEY + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
clientConf.setProxyUsername(proxyUsername);
clientConf.setProxyPassword(proxyPassword);
clientConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN_KEY));
clientConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION_KEY));
} else if (proxyPort >= 0) {
String msg = "Proxy error: " + PROXY_PORT_KEY + " set without " +
PROXY_HOST_KEY;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
String endPoint = conf.getTrimmed(ENDPOINT_KEY, "");
if (StringUtils.isEmpty(endPoint)) {
throw new IllegalArgumentException("Aliyun OSS endpoint should not be " +
"null or empty. Please set proper endpoint with 'fs.oss.endpoint'.");
}
CredentialsProvider provider =
AliyunOSSUtils.getCredentialsProvider(uri, conf);
ossClient = new OSSClient(endPoint, provider, clientConf);
uploadPartSize = AliyunOSSUtils.getMultipartSizeProperty(conf,
MULTIPART_UPLOAD_PART_SIZE_KEY, MULTIPART_UPLOAD_PART_SIZE_DEFAULT);
serverSideEncryptionAlgorithm =
conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM_KEY, "");
bucketName = uri.getHost();
String cannedACLName = conf.get(CANNED_ACL_KEY, CANNED_ACL_DEFAULT);
if (StringUtils.isNotEmpty(cannedACLName)) {
CannedAccessControlList cannedACL =
CannedAccessControlList.valueOf(cannedACLName);
ossClient.setBucketAcl(bucketName, cannedACL);
statistics.incrementWriteOps(1);
}
if (StringUtils.isNotEmpty(region)) {
ossClient.setRegion(region);
LOG.debug("ossClient setRegion {}", region);
}
maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
int listVersion = conf.getInt(LIST_VERSION, DEFAULT_LIST_VERSION);
if (listVersion < 1 || listVersion > 2) {
LOG.warn("Configured fs.oss.list.version {} is invalid, forcing " +
"version 2", listVersion);
}
useListV1 = (listVersion == 1);
}
/**
* Delete an object, and update write operation statistics.
*
* @param key key to blob to delete.
*/
public void deleteObject(String key) {
ossClient.deleteObject(bucketName, key);
statistics.incrementWriteOps(1);
}
/**
* Delete a list of keys, and update write operation statistics.
*
* @param keysToDelete collection of keys to delete.
* @throws IOException if failed to delete objects.
*/
public void deleteObjects(List<String> keysToDelete) throws IOException {
if (CollectionUtils.isEmpty(keysToDelete)) {
LOG.warn("Keys to delete is empty.");
return;
}
int retry = 10;
int tries = 0;
while (CollectionUtils.isNotEmpty(keysToDelete)) {
DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucketName);
deleteRequest.setKeys(keysToDelete);
// There are two modes to do batch delete:
// 1. verbose mode: A list of all deleted objects is returned.
// 2. quiet mode: No message body is returned.
// Here, we choose the verbose mode to do batch delete.
deleteRequest.setQuiet(false);
DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
final List<String> deletedObjects = result.getDeletedObjects();
keysToDelete = keysToDelete.stream().filter(item -> !deletedObjects.contains(item))
.collect(Collectors.toList());
tries++;
if (tries == retry) {
break;
}
}
if (tries == retry && CollectionUtils.isNotEmpty(keysToDelete)) {
// Most of time, it is impossible to try 10 times, expect the
// Aliyun OSS service problems.
throw new IOException("Failed to delete Aliyun OSS objects for " + tries + " times.");
}
}
/**
* Delete a directory from Aliyun OSS.
*
* @param key directory key to delete.
* @throws IOException if failed to delete directory.
*/
public void deleteDirs(String key) throws IOException {
OSSListRequest listRequest = createListObjectsRequest(key,
maxKeys, null, null, true);
while (true) {
OSSListResult objects = listObjects(listRequest);
statistics.incrementReadOps(1);
List<String> keysToDelete = new ArrayList<String>();
for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
keysToDelete.add(objectSummary.getKey());
}
deleteObjects(keysToDelete);
if (objects.isTruncated()) {
if (objects.isV1()) {
listRequest.getV1().setMarker(objects.getV1().getNextMarker());
} else {
listRequest.getV2().setContinuationToken(
objects.getV2().getNextContinuationToken());
}
} else {
break;
}
}
}
/**
* Return metadata of a given object key.
*
* @param key object key.
* @return return null if key does not exist.
*/
public ObjectMetadata getObjectMetadata(String key) {
try {
GenericRequest request = new GenericRequest(bucketName, key);
request.setLogEnabled(false);
ObjectMetadata objectMeta = ossClient.getObjectMetadata(request);
statistics.incrementReadOps(1);
return objectMeta;
} catch (OSSException osse) {
LOG.debug("Exception thrown when get object meta: "
+ key + ", exception: " + osse);
return null;
}
}
/**
* Upload an empty file as an OSS object, using single upload.
*
* @param key object key.
* @throws IOException if failed to upload object.
*/
public void storeEmptyFile(String key) throws IOException {
ObjectMetadata dirMeta = new ObjectMetadata();
byte[] buffer = new byte[0];
ByteArrayInputStream in = new ByteArrayInputStream(buffer);
dirMeta.setContentLength(0);
try {
ossClient.putObject(bucketName, key, in, dirMeta);
statistics.incrementWriteOps(1);
} finally {
in.close();
}
}
/**
* Copy an object from source key to destination key.
*
* @param srcKey source key.
* @param srcLen source file length.
* @param dstKey destination key.
* @return true if file is successfully copied.
*/
public boolean copyFile(String srcKey, long srcLen, String dstKey) {
try {
//1, try single copy first
return singleCopy(srcKey, dstKey);
} catch (Exception e) {
//2, if failed(shallow copy not supported), then multi part copy
LOG.debug("Exception thrown when copy file: " + srcKey
+ ", exception: " + e + ", use multipartCopy instead");
return multipartCopy(srcKey, srcLen, dstKey);
}
}
/**
* Use single copy to copy an OSS object.
* (The caller should make sure srcPath is a file and dstPath is valid)
*
* @param srcKey source key.
* @param dstKey destination key.
* @return true if object is successfully copied.
*/
private boolean singleCopy(String srcKey, String dstKey) {
CopyObjectResult copyResult =
ossClient.copyObject(bucketName, srcKey, bucketName, dstKey);
statistics.incrementWriteOps(1);
LOG.debug(copyResult.getETag());
return true;
}
/**
* Use multipart copy to copy an OSS object.
* (The caller should make sure srcPath is a file and dstPath is valid)
*
* @param srcKey source key.
* @param contentLength data size of the object to copy.
* @param dstKey destination key.
* @return true if success, or false if upload is aborted.
*/
private boolean multipartCopy(String srcKey, long contentLength,
String dstKey) {
long realPartSize =
AliyunOSSUtils.calculatePartSize(contentLength, uploadPartSize);
int partNum = (int) (contentLength / realPartSize);
if (contentLength % realPartSize != 0) {
partNum++;
}
InitiateMultipartUploadRequest initiateMultipartUploadRequest =
new InitiateMultipartUploadRequest(bucketName, dstKey);
ObjectMetadata meta = new ObjectMetadata();
if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
initiateMultipartUploadRequest.setObjectMetadata(meta);
InitiateMultipartUploadResult initiateMultipartUploadResult =
ossClient.initiateMultipartUpload(initiateMultipartUploadRequest);
String uploadId = initiateMultipartUploadResult.getUploadId();
List<PartETag> partETags = new ArrayList<PartETag>();
try {
for (int i = 0; i < partNum; i++) {
long skipBytes = realPartSize * i;
long size = (realPartSize < contentLength - skipBytes) ?
realPartSize : contentLength - skipBytes;
UploadPartCopyRequest partCopyRequest = new UploadPartCopyRequest();
partCopyRequest.setSourceBucketName(bucketName);
partCopyRequest.setSourceKey(srcKey);
partCopyRequest.setBucketName(bucketName);
partCopyRequest.setKey(dstKey);
partCopyRequest.setUploadId(uploadId);
partCopyRequest.setPartSize(size);
partCopyRequest.setBeginIndex(skipBytes);
partCopyRequest.setPartNumber(i + 1);
UploadPartCopyResult partCopyResult =
ossClient.uploadPartCopy(partCopyRequest);
statistics.incrementWriteOps(1);
statistics.incrementBytesWritten(size);
partETags.add(partCopyResult.getPartETag());
}
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(bucketName, dstKey,
uploadId, partETags);
CompleteMultipartUploadResult completeMultipartUploadResult =
ossClient.completeMultipartUpload(completeMultipartUploadRequest);
LOG.debug(completeMultipartUploadResult.getETag());
return true;
} catch (OSSException | ClientException e) {
AbortMultipartUploadRequest abortMultipartUploadRequest =
new AbortMultipartUploadRequest(bucketName, dstKey, uploadId);
ossClient.abortMultipartUpload(abortMultipartUploadRequest);
return false;
}
}
/**
* Upload a file as an OSS object, using single upload.
*
* @param key object key.
* @param file local file to upload.
* @throws IOException if failed to upload object.
*/
public void uploadObject(String key, File file) throws IOException {
File object = file.getAbsoluteFile();
FileInputStream fis = new FileInputStream(object);
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(object.length());
if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
try {
PutObjectResult result = ossClient.putObject(bucketName, key, fis, meta);
LOG.debug(result.getETag());
statistics.incrementWriteOps(1);
} finally {
fis.close();
}
}
/**
* Upload an input stream as an OSS object, using single upload.
* @param key object key.
* @param in input stream to upload.
* @param size size of the input stream.
* @throws IOException if failed to upload object.
*/
public void uploadObject(String key, InputStream in, long size)
throws IOException {
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(size);
if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
PutObjectResult result = ossClient.putObject(bucketName, key, in, meta);
LOG.debug(result.getETag());
statistics.incrementWriteOps(1);
}
/**
* list objects.
*
* @param listRequest list request.
* @return a list of matches.
*/
public OSSListResult listObjects(OSSListRequest listRequest) {
OSSListResult listResult;
if (listRequest.isV1()) {
listResult = OSSListResult.v1(
ossClient.listObjects(listRequest.getV1()));
} else {
listResult = OSSListResult.v2(
ossClient.listObjectsV2(listRequest.getV2()));
}
statistics.incrementReadOps(1);
return listResult;
}
/**
* continue to list objects depends on previous list result.
*
* @param listRequest list request.
* @param preListResult previous list result.
* @return a list of matches.
*/
public OSSListResult continueListObjects(OSSListRequest listRequest,
OSSListResult preListResult) {
OSSListResult listResult;
if (listRequest.isV1()) {
listRequest.getV1().setMarker(preListResult.getV1().getNextMarker());
listResult = OSSListResult.v1(
ossClient.listObjects(listRequest.getV1()));
} else {
listRequest.getV2().setContinuationToken(
preListResult.getV2().getNextContinuationToken());
listResult = OSSListResult.v2(
ossClient.listObjectsV2(listRequest.getV2()));
}
statistics.incrementReadOps(1);
return listResult;
}
/**
* create list objects request.
*
* @param prefix prefix.
* @param maxListingLength max no. of entries
* @param marker last key in any previous search.
* @param continuationToken list from a specific point.
* @param recursive whether to list directory recursively.
* @return a list of matches.
*/
protected OSSListRequest createListObjectsRequest(String prefix,
int maxListingLength, String marker,
String continuationToken, boolean recursive) {
String delimiter = recursive ? null : "/";
prefix = AliyunOSSUtils.maybeAddTrailingSlash(prefix);
if (useListV1) {
ListObjectsRequest listRequest = new ListObjectsRequest(bucketName);
listRequest.setPrefix(prefix);
listRequest.setDelimiter(delimiter);
listRequest.setMaxKeys(maxListingLength);
listRequest.setMarker(marker);
return OSSListRequest.v1(listRequest);
} else {
ListObjectsV2Request listV2Request = new ListObjectsV2Request(bucketName);
listV2Request.setPrefix(prefix);
listV2Request.setDelimiter(delimiter);
listV2Request.setMaxKeys(maxListingLength);
listV2Request.setContinuationToken(continuationToken);
return OSSListRequest.v2(listV2Request);
}
}
/**
* Retrieve a part of an object.
*
* @param key the object name that is being retrieved from the Aliyun OSS.
* @param byteStart start position.
* @param byteEnd end position.
* @return This method returns null if the key is not found.
*/
public InputStream retrieve(String key, long byteStart, long byteEnd) {
try {
GetObjectRequest request = new GetObjectRequest(bucketName, key);
request.setRange(byteStart, byteEnd);
InputStream in = ossClient.getObject(request).getObjectContent();
statistics.incrementReadOps(1);
return in;
} catch (OSSException | ClientException e) {
LOG.error("Exception thrown when store retrieves key: "
+ key + ", exception: " + e);
return null;
}
}
/**
* Close OSS client properly.
*/
public void close() {
if (ossClient != null) {
ossClient.shutdown();
ossClient = null;
}
}
/**
* Clean up all objects matching the prefix.
*
* @param prefix Aliyun OSS object prefix.
* @throws IOException if failed to clean up objects.
*/
public void purge(String prefix) throws IOException {
deleteDirs(prefix);
}
public RemoteIterator<LocatedFileStatus> singleStatusRemoteIterator(
final FileStatus fileStatus, final BlockLocation[] locations) {
return new RemoteIterator<LocatedFileStatus>() {
private boolean hasNext = true;
@Override
public boolean hasNext() throws IOException {
return fileStatus != null && hasNext;
}
@Override
public LocatedFileStatus next() throws IOException {
if (hasNext()) {
LocatedFileStatus s = new LocatedFileStatus(fileStatus,
fileStatus.isFile() ? locations : null);
hasNext = false;
return s;
} else {
throw new NoSuchElementException();
}
}
};
}
public RemoteIterator<LocatedFileStatus> createLocatedFileStatusIterator(
final String prefix, final int maxListingLength, FileSystem fs,
PathFilter filter, FileStatusAcceptor acceptor, boolean recursive) {
return new RemoteIterator<LocatedFileStatus>() {
private boolean firstListing = true;
private boolean meetEnd = false;
private ListIterator<FileStatus> batchIterator;
private OSSListRequest listRequest = null;
@Override
public boolean hasNext() throws IOException {
if (firstListing) {
requestNextBatch();
firstListing = false;
}
return batchIterator.hasNext() || requestNextBatch();
}
@Override
public LocatedFileStatus next() throws IOException {
if (hasNext()) {
FileStatus status = batchIterator.next();
BlockLocation[] locations = fs.getFileBlockLocations(status,
0, status.getLen());
return new LocatedFileStatus(
status, status.isFile() ? locations : null);
} else {
throw new NoSuchElementException();
}
}
private boolean requestNextBatch() {
while (!meetEnd) {
if (continueListStatus()) {
return true;
}
}
return false;
}
private boolean continueListStatus() {
if (meetEnd) {
return false;
}
if (listRequest == null) {
listRequest = createListObjectsRequest(prefix,
maxListingLength, null, null, recursive);
}
OSSListResult listing = listObjects(listRequest);
List<FileStatus> stats = new ArrayList<>(
listing.getObjectSummaries().size() +
listing.getCommonPrefixes().size());
for (OSSObjectSummary summary : listing.getObjectSummaries()) {
String key = summary.getKey();
Path path = fs.makeQualified(new Path("/" + key));
if (filter.accept(path) && acceptor.accept(path, summary)) {
FileStatus status = new OSSFileStatus(summary.getSize(),
key.endsWith("/"), 1, fs.getDefaultBlockSize(path),
summary.getLastModified().getTime(), path, username);
stats.add(status);
}
}
for (String commonPrefix : listing.getCommonPrefixes()) {
Path path = fs.makeQualified(new Path("/" + commonPrefix));
if (filter.accept(path) && acceptor.accept(path, commonPrefix)) {
FileStatus status = new OSSFileStatus(0, true, 1, 0, 0,
path, username);
stats.add(status);
}
}
batchIterator = stats.listIterator();
if (listing.isTruncated()) {
if (listing.isV1()) {
listRequest.getV1().setMarker(listing.getV1().getNextMarker());
} else {
listRequest.getV2().setContinuationToken(
listing.getV2().getNextContinuationToken());
}
} else {
meetEnd = true;
}
statistics.incrementReadOps(1);
return batchIterator.hasNext();
}
};
}
public PartETag uploadPart(OSSDataBlocks.BlockUploadData partData,
long size, String key, String uploadId, int idx) throws IOException {
if (partData.hasFile()) {
return uploadPart(partData.getFile(), key, uploadId, idx);
} else {
return uploadPart(partData.getUploadStream(), size, key, uploadId, idx);
}
}
public PartETag uploadPart(File file, String key, String uploadId, int idx)
throws IOException {
InputStream in = new FileInputStream(file);
try {
return uploadPart(in, file.length(), key, uploadId, idx);
} finally {
in.close();
}
}
public PartETag uploadPart(InputStream in, long size, String key,
String uploadId, int idx) throws IOException {
Exception caught = null;
int tries = 3;
while (tries > 0) {
try {
UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(key);
uploadRequest.setUploadId(uploadId);
uploadRequest.setInputStream(in);
uploadRequest.setPartSize(size);
uploadRequest.setPartNumber(idx);
UploadPartResult uploadResult = ossClient.uploadPart(uploadRequest);
statistics.incrementWriteOps(1);
return uploadResult.getPartETag();
} catch (Exception e) {
LOG.debug("Failed to upload " + key + ", part " + idx +
"try again.", e);
caught = e;
}
tries--;
}
assert (caught != null);
throw new IOException("Failed to upload " + key + ", part " + idx +
" for 3 times.", caught);
}
/**
* Initiate multipart upload.
* @param key object key.
* @return upload id.
*/
public String getUploadId(String key) {
InitiateMultipartUploadRequest initiateMultipartUploadRequest =
new InitiateMultipartUploadRequest(bucketName, key);
InitiateMultipartUploadResult initiateMultipartUploadResult =
ossClient.initiateMultipartUpload(initiateMultipartUploadRequest);
return initiateMultipartUploadResult.getUploadId();
}
/**
* Complete the specific multipart upload.
* @param key object key.
* @param uploadId upload id of this multipart upload.
* @param partETags part etags need to be completed.
* @return CompleteMultipartUploadResult.
*/
public CompleteMultipartUploadResult completeMultipartUpload(String key,
String uploadId, List<PartETag> partETags) {
Collections.sort(partETags, new PartNumberAscendComparator());
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(bucketName, key, uploadId,
partETags);
return ossClient.completeMultipartUpload(completeMultipartUploadRequest);
}
/**
* Abort the specific multipart upload.
* @param key object key.
* @param uploadId upload id of this multipart upload.
*/
public void abortMultipartUpload(String key, String uploadId) {
AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(
bucketName, key, uploadId);
ossClient.abortMultipartUpload(request);
}
private static
|
AliyunOSSFileSystemStore
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/DescriptionFormatter.java
|
{
"start": 885,
"end": 1793
}
|
class ____ {
private static final DescriptionFormatter INSTANCE = new DescriptionFormatter();
/**
* Returns the singleton instance of this class.
* @return the singleton instance of this class.
*/
public static DescriptionFormatter instance() {
return INSTANCE;
}
// TODO reduce the visibility of the fields annotated with @VisibleForTesting
DescriptionFormatter() {}
/**
* Formats the given <code>{@link Description}</code> by surrounding its text value with square brackets and adding a space at
* the end.
* @param d the description to format. It can be {@code null}.
* @return the formatted description, or an empty {@code String} if the {@code Description} is {@code null}.
*/
public String format(Description d) {
String s = (d != null) ? d.value() : null;
if (isNullOrEmpty(s)) return "";
return "[%s] ".formatted(s);
}
}
|
DescriptionFormatter
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/logging/TestException.java
|
{
"start": 941,
"end": 2222
}
|
class ____ {
private static final Pattern LINE_NUMBER_PATTERN = Pattern.compile("\\.java\\:\\d+\\)");
private TestException() {
}
public static Exception create() {
CreatorThread creatorThread = new CreatorThread();
creatorThread.start();
try {
creatorThread.join();
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
Exception exception = creatorThread.exception;
assertThat(exception).isNotNull();
return exception;
}
private static Exception createTestException() {
Throwable root = new RuntimeException("root");
Throwable cause = createCause(root);
Exception exception = createException(cause);
exception.addSuppressed(new RuntimeException("suppressed"));
return exception;
}
private static Throwable createCause(Throwable root) {
return new RuntimeException("cause", root);
}
private static Exception createException(Throwable cause) {
return actualCreateException(cause);
}
private static Exception actualCreateException(Throwable cause) {
return new RuntimeException("exception", cause);
}
public static String withoutLineNumbers(String stackTrace) {
Matcher matcher = LINE_NUMBER_PATTERN.matcher(stackTrace);
return matcher.replaceAll(".java:NN)");
}
private static final
|
TestException
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamOperator.java
|
{
"start": 2111,
"end": 7458
}
|
interface ____<OUT> extends CheckpointListener, KeyContext, Serializable {
// ------------------------------------------------------------------------
// life cycle
// ------------------------------------------------------------------------
/**
* This method is called immediately before any elements are processed, it should contain the
* operator's initialization logic.
*
* @implSpec In case of recovery, this method needs to ensure that all recovered data is
* processed before passing back control, so that the order of elements is ensured during
* the recovery of an operator chain (operators are opened from the tail operator to the
* head operator).
* @throws java.lang.Exception An exception in this method causes the operator to fail.
*/
void open() throws Exception;
/**
* This method is called at the end of data processing.
*
* <p>The method is expected to flush all remaining buffered data. Exceptions during this
* flushing of buffered data should be propagated, in order to cause the operation to be
* recognized as failed, because the last data items are not processed properly.
*
* <p><b>After this method is called, no more records can be produced for the downstream
* operators.</b>
*
* <p><b>WARNING:</b> It is not safe to use this method to commit any transactions or other side
* effects! You can use this method to flush any buffered data that can later on be committed
* e.g. in a {@link StreamOperator#notifyCheckpointComplete(long)}.
*
* <p><b>NOTE:</b>This method does not need to close any resources. You should release external
* resources in the {@link #close()} method.
*
* @throws java.lang.Exception An exception in this method causes the operator to fail.
*/
void finish() throws Exception;
/**
* This method is called at the very end of the operator's life, both in the case of a
* successful completion of the operation, and in the case of a failure and canceling.
*
* <p>This method is expected to make a thorough effort to release all resources that the
* operator has acquired.
*
* <p><b>NOTE:</b>It can not emit any records! If you need to emit records at the end of
* processing, do so in the {@link #finish()} method.
*/
void close() throws Exception;
// ------------------------------------------------------------------------
// state snapshots
// ------------------------------------------------------------------------
/**
* This method is called when the operator should do a snapshot, before it emits its own
* checkpoint barrier.
*
* <p>This method is intended not for any actual state persistence, but only for emitting some
* data before emitting the checkpoint barrier. Operators that maintain some small transient
* state that is inefficient to checkpoint (especially when it would need to be checkpointed in
* a re-scalable way) but can simply be sent downstream before the checkpoint. An example are
* opportunistic pre-aggregation operators, which have small the pre-aggregation state that is
* frequently flushed downstream.
*
* <p><b>Important:</b> This method should not be used for any actual state snapshot logic,
* because it will inherently be within the synchronous part of the operator's checkpoint. If
* heavy work is done within this method, it will affect latency and downstream checkpoint
* alignments.
*
* @param checkpointId The ID of the checkpoint.
* @throws Exception Throwing an exception here causes the operator to fail and go into
* recovery.
*/
void prepareSnapshotPreBarrier(long checkpointId) throws Exception;
/**
* Called to draw a state snapshot from the operator.
*
* @return a runnable future to the state handle that points to the snapshotted state. For
* synchronous implementations, the runnable might already be finished.
* @throws Exception exception that happened during snapshotting.
*/
OperatorSnapshotFutures snapshotState(
long checkpointId,
long timestamp,
CheckpointOptions checkpointOptions,
CheckpointStreamFactory storageLocation)
throws Exception;
/** Provides a context to initialize all state in the operator. */
void initializeState(StreamTaskStateInitializer streamTaskStateManager) throws Exception;
// ------------------------------------------------------------------------
// miscellaneous
// ------------------------------------------------------------------------
void setKeyContextElement1(StreamRecord<?> record) throws Exception;
void setKeyContextElement2(StreamRecord<?> record) throws Exception;
OperatorMetricGroup getMetricGroup();
OperatorID getOperatorID();
/**
* Called to get the OperatorAttributes of the operator. If there is no defined attribute, a
* default OperatorAttributes is built.
*
* @return OperatorAttributes of the operator.
*/
@Experimental
default OperatorAttributes getOperatorAttributes() {
return new OperatorAttributesBuilder().build();
}
}
|
StreamOperator
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/jpa/PersistenceUnitInfoImpl.java
|
{
"start": 930,
"end": 4531
}
|
class ____ implements PersistenceUnitInfo {
private final String name;
private final Properties properties = new Properties();
private String scopeAnnotationName;
private List<String> qualifierAnnotationNames = List.of();
private SharedCacheMode cacheMode;
private ValidationMode validationMode;
@SuppressWarnings("removal")
private PersistenceUnitTransactionType transactionType;
private List<String> mappingFiles;
private List<String> managedClassNames;
private boolean excludeUnlistedClasses;
private ClassLoader classLoader;
public PersistenceUnitInfoImpl(String name) {
this.name = name;
}
@Override
public String getPersistenceUnitName() {
return name;
}
@Override
public String getScopeAnnotationName() {
return scopeAnnotationName;
}
public void setScopeAnnotationName(String scopeAnnotationName) {
this.scopeAnnotationName = scopeAnnotationName;
}
@Override
public List<String> getQualifierAnnotationNames() {
return qualifierAnnotationNames;
}
public void setQualifierAnnotationNames(List<String> qualifierAnnotationNames) {
this.qualifierAnnotationNames = qualifierAnnotationNames;
}
@Override
public Properties getProperties() {
return properties;
}
@Override
public String getPersistenceProviderClassName() {
return HibernatePersistenceProvider.class.getName();
}
@Override @SuppressWarnings("removal")
public PersistenceUnitTransactionType getTransactionType() {
return transactionType;
}
public void setTransactionType(@SuppressWarnings("removal") PersistenceUnitTransactionType transactionType) {
this.transactionType = transactionType;
}
@Override
public SharedCacheMode getSharedCacheMode() {
return cacheMode;
}
public void setCacheMode(SharedCacheMode cacheMode) {
this.cacheMode = cacheMode;
}
@Override
public ValidationMode getValidationMode() {
return validationMode;
}
public void setValidationMode(ValidationMode validationMode) {
this.validationMode = validationMode;
}
@Override
public List<String> getMappingFileNames() {
return mappingFiles == null ? emptyList() : mappingFiles;
}
public void applyMappingFiles(String... mappingFiles) {
if ( this.mappingFiles == null ) {
this.mappingFiles = new ArrayList<>();
}
Collections.addAll( this.mappingFiles, mappingFiles );
}
@Override
public List<String> getManagedClassNames() {
return managedClassNames == null ? emptyList() : managedClassNames;
}
public void applyManagedClassNames(String... managedClassNames) {
if ( this.managedClassNames == null ) {
this.managedClassNames = new ArrayList<>();
}
Collections.addAll( this.managedClassNames, managedClassNames );
}
@Override
public boolean excludeUnlistedClasses() {
return excludeUnlistedClasses;
}
public void setExcludeUnlistedClasses(boolean excludeUnlistedClasses) {
this.excludeUnlistedClasses = excludeUnlistedClasses;
}
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
public void setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
@Override
public String getPersistenceXMLSchemaVersion() {
return null;
}
@Override
public DataSource getJtaDataSource() {
return null;
}
@Override
public DataSource getNonJtaDataSource() {
return null;
}
@Override
public List<URL> getJarFileUrls() {
return null;
}
@Override
public URL getPersistenceUnitRootUrl() {
return null;
}
@Override
public void addTransformer(ClassTransformer transformer) {
}
@Override
public ClassLoader getNewTempClassLoader() {
return null;
}
}
|
PersistenceUnitInfoImpl
|
java
|
dropwizard__dropwizard
|
dropwizard-example/src/test/java/com/example/helloworld/DockerIntegrationTest.java
|
{
"start": 3475,
"end": 6838
}
|
class ____ {
@Test
void validDateParameter() {
final String date = APP.client().target("http://localhost:" + APP.getLocalPort() + "/hello-world/date")
.queryParam("date", "2022-01-20")
.request()
.get(String.class);
assertThat(date).isEqualTo("2022-01-20");
}
@ParameterizedTest
@ValueSource(strings = {"null", "abc", "0"})
void invalidDateParameter(String value) {
assertThatExceptionOfType(BadRequestException.class)
.isThrownBy(() -> APP.client().target("http://localhost:" + APP.getLocalPort() + "/hello-world/date")
.queryParam("date", value)
.request()
.get(String.class));
}
@Test
void noDateParameter() {
final String date = APP.client().target("http://localhost:" + APP.getLocalPort() + "/hello-world/date")
.request()
.get(String.class);
assertThat(date).isEmpty();
}
}
@Test
void testPostPerson() {
final Person person = new Person("Dr. IntegrationTest", "Chief Wizard", 1525);
final Person newPerson = postPerson(person);
assertThat(newPerson.getFullName()).isEqualTo(person.getFullName());
assertThat(newPerson.getJobTitle()).isEqualTo(person.getJobTitle());
}
@ParameterizedTest
@ValueSource(strings={"view_freemarker", "view_mustache"})
void testRenderingPerson(String viewName) {
final Person person = new Person("Dr. IntegrationTest", "Chief Wizard", 1525);
final Person newPerson = postPerson(person);
final String url = "http://localhost:" + APP.getLocalPort() + "/people/" + newPerson.getId() + "/" + viewName;
Response response = APP.client().target(url).request().get();
assertThat(response.getStatus()).isEqualTo(HttpStatus.OK_200);
}
private Person postPerson(Person person) {
return APP.client().target("http://localhost:" + APP.getLocalPort() + "/people")
.request()
.post(Entity.entity(person, MediaType.APPLICATION_JSON_TYPE))
.readEntity(Person.class);
}
@Test
void testLogFileWritten() {
// The log file is using a size and time based policy, which used to silently
// fail (and not write to a log file). This test ensures not only that the
// log file exists, but also contains the log line that jetty prints on startup
assertThat(new File(CURRENT_LOG.get()))
.exists()
.content()
.contains("Starting hello-world",
"Started application@",
"0.0.0.0:" + APP.getLocalPort(),
"Started admin@",
"0.0.0.0:" + APP.getAdminPort())
.doesNotContain("ERROR", "FATAL", "Exception");
}
@Test
void healthCheckShouldSucceed() {
final Response healthCheckResponse =
APP.client().target("http://localhost:" + APP.getLocalPort() + "/health-check")
.request()
.get();
assertThat(healthCheckResponse)
.extracting(Response::getStatus)
.isEqualTo(Response.Status.OK.getStatusCode());
}
}
|
DateParameterTests
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/staticmethods/InterceptedStaticMethodBuildItem.java
|
{
"start": 415,
"end": 1788
}
|
class ____ extends MultiBuildItem {
private final MethodInfo method;
private final List<InterceptorInfo> interceptors;
private final Set<AnnotationInstance> bindings;
private final String hash;
InterceptedStaticMethodBuildItem(MethodInfo method, Set<AnnotationInstance> bindings, List<InterceptorInfo> interceptors) {
this.method = method;
this.interceptors = interceptors;
this.bindings = bindings;
this.hash = HashUtil.sha1(method.declaringClass().name().toString() + method.toString());
}
public ClassInfo getTarget() {
return method.declaringClass();
}
public MethodInfo getMethod() {
return method;
}
/**
*
* @return the list of interceptors that should be applied
*/
public List<InterceptorInfo> getInterceptors() {
return interceptors;
}
/**
*
* @return the set of interceptor bindings
*/
public Set<AnnotationInstance> getBindings() {
return bindings;
}
/**
*
* @return the unique hash that could be used to identify the method
*/
public String getHash() {
return hash;
}
/**
*
* @return the name of the generated forwarding method
*/
public String getForwardingMethodName() {
return "_" + hash;
}
}
|
InterceptedStaticMethodBuildItem
|
java
|
apache__camel
|
components/camel-ftp/src/main/java/org/apache/camel/component/file/remote/RemoteFileConsumer.java
|
{
"start": 1447,
"end": 1500
}
|
class ____ remote file consumers.
*/
public abstract
|
for
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/stream/state/OuterJoinRecordStateViews.java
|
{
"start": 2038,
"end": 3380
}
|
class ____ {
/** Creates a {@link OuterJoinRecordStateView} depends on {@link JoinInputSideSpec}. */
public static OuterJoinRecordStateView create(
RuntimeContext ctx,
String stateName,
JoinInputSideSpec inputSideSpec,
InternalTypeInfo<RowData> recordType,
long retentionTime) {
StateTtlConfig ttlConfig = createTtlConfig(retentionTime);
if (inputSideSpec.hasUniqueKey()) {
if (inputSideSpec.joinKeyContainsUniqueKey()) {
return new OuterJoinRecordStateViews.JoinKeyContainsUniqueKey(
ctx, stateName, recordType, ttlConfig);
} else {
return new OuterJoinRecordStateViews.InputSideHasUniqueKey(
ctx,
stateName,
recordType,
inputSideSpec.getUniqueKeyType(),
inputSideSpec.getUniqueKeySelector(),
ttlConfig);
}
} else {
return new OuterJoinRecordStateViews.InputSideHasNoUniqueKey(
ctx, stateName, recordType, ttlConfig);
}
}
// ------------------------------------------------------------------------------------------
private static final
|
OuterJoinRecordStateViews
|
java
|
apache__kafka
|
metadata/src/test/java/org/apache/kafka/metadata/LeaderRecoveryStateTest.java
|
{
"start": 1207,
"end": 3313
}
|
class ____ {
private static final byte NO_CHANGE = (byte) -1;
@Test
void testUniqueValues() {
Set<Byte> set = new HashSet<>();
for (LeaderRecoveryState recovery : LeaderRecoveryState.values()) {
assertTrue(
set.add(recovery.value()),
String.format("Value %s for election state %s has already been used", recovery.value(), recovery)
);
}
}
@Test
void testDoesNotContainNoChange() {
for (LeaderRecoveryState recovery : LeaderRecoveryState.values()) {
assertNotEquals(NO_CHANGE, recovery.value());
}
}
@Test
void testByteToLeaderRecoveryState() {
assertEquals(LeaderRecoveryState.RECOVERED, LeaderRecoveryState.of((byte) 0));
assertEquals(LeaderRecoveryState.RECOVERING, LeaderRecoveryState.of((byte) 1));
}
@Test
void testLeaderRecoveryStateValue() {
assertEquals(0, LeaderRecoveryState.RECOVERED.value());
assertEquals(1, LeaderRecoveryState.RECOVERING.value());
}
@Test
void testInvalidValue() {
assertThrows(
IllegalArgumentException.class,
() -> LeaderRecoveryState.of(NO_CHANGE)
);
assertThrows(IllegalArgumentException.class, () -> LeaderRecoveryState.of((byte) 2));
}
@Test
void testOptionalInvalidValue() {
assertEquals(Optional.empty(), LeaderRecoveryState.optionalOf(NO_CHANGE));
assertEquals(Optional.empty(), LeaderRecoveryState.optionalOf((byte) 2));
}
@Test
void testChangeTo() {
LeaderRecoveryState state = LeaderRecoveryState.RECOVERED;
assertEquals(LeaderRecoveryState.RECOVERED, state.changeTo(NO_CHANGE));
state = state.changeTo(LeaderRecoveryState.RECOVERING.value());
assertEquals(LeaderRecoveryState.RECOVERING, state);
assertEquals(LeaderRecoveryState.RECOVERING, state.changeTo(NO_CHANGE));
state = state.changeTo(LeaderRecoveryState.RECOVERED.value());
assertEquals(LeaderRecoveryState.RECOVERED, state);
}
}
|
LeaderRecoveryStateTest
|
java
|
elastic__elasticsearch
|
test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ExceptionUtils.java
|
{
"start": 528,
"end": 788
}
|
class ____ {
private ExceptionUtils() {}
public static Throwable findRootCause(Throwable t) {
Throwable cause = t.getCause();
if (cause == null) {
return t;
}
return findRootCause(cause);
}
}
|
ExceptionUtils
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/MultipartWebClientIntegrationTests.java
|
{
"start": 11615,
"end": 12165
}
|
class ____ {
private String fieldPart;
private List<FilePart> fileParts;
public String getFieldPart() {
return this.fieldPart;
}
public void setFieldPart(String fieldPart) {
this.fieldPart = fieldPart;
}
public List<FilePart> getFileParts() {
return this.fileParts;
}
public void setFileParts(List<FilePart> fileParts) {
this.fileParts = fileParts;
}
@Override
public String toString() {
return "FormBean[" + getFieldPart() + "," + partListDescription(getFileParts()) + "]";
}
}
private static
|
FormBean
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableRefCountTest.java
|
{
"start": 41838,
"end": 44377
}
|
class ____<T> extends ConnectableObservable<T> {
volatile boolean reset;
@Override
public void reset() {
reset = true;
}
@Override
public void connect(Consumer<? super Disposable> connection) {
// not relevant
}
@Override
protected void subscribeActual(Observer<? super T> observer) {
// not relevant
}
}
@Test
public void timeoutResetsSource() {
TestConnectableObservable<Object> tco = new TestConnectableObservable<>();
ObservableRefCount<Object> o = (ObservableRefCount<Object>)tco.refCount();
RefConnection rc = new RefConnection(o);
rc.set(Disposable.empty());
o.connection = rc;
o.timeout(rc);
assertTrue(tco.reset);
}
@Test
public void disconnectBeforeConnect() {
BehaviorSubject<Integer> subject = BehaviorSubject.create();
Observable<Integer> observable = subject
.replay(1)
.refCount();
observable.takeUntil(Observable.just(1)).test();
subject.onNext(2);
observable.take(1).test().assertResult(2);
}
@Test
public void publishRefCountShallBeThreadSafe() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
Observable<Integer> observable = Observable.just(1).publish().refCount();
TestObserver<Integer> observer1 = observable
.subscribeOn(Schedulers.io())
.test();
TestObserver<Integer> observer2 = observable
.subscribeOn(Schedulers.io())
.test();
observer1
.withTag("observer1 " + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertNoErrors()
.assertComplete();
observer2
.withTag("observer2 " + i)
.awaitDone(5, TimeUnit.SECONDS)
.assertNoErrors()
.assertComplete();
}
}
@Test
public void upstreamTerminationTriggersAnotherCancel() throws Exception {
ReplaySubject<Integer> rs = ReplaySubject.create();
rs.onNext(1);
rs.onComplete();
Observable<Integer> shared = rs.share();
shared
.buffer(shared.debounce(5, TimeUnit.SECONDS))
.test()
.assertValueCount(2);
shared
.buffer(shared.debounce(5, TimeUnit.SECONDS))
.test()
.assertValueCount(2);
}
}
|
TestConnectableObservable
|
java
|
quarkusio__quarkus
|
integration-tests/kafka-sasl-elytron/src/test/java/io/quarkus/it/kafka/KafkaSaslTestResource.java
|
{
"start": 460,
"end": 3294
}
|
class ____ implements QuarkusTestResourceLifecycleManager {
private final Logger log = Logger.getLogger(KafkaSaslTestResource.class);
private StrimziKafkaContainer kafka;
private KerberosContainer kerberos;
@Override
public Map<String, String> start() {
Map<String, String> properties = new HashMap<>();
//Start kerberos container
kerberos = new KerberosContainer("gcavalcante8808/krb5-server");
kerberos.start();
log.info(kerberos.getLogs());
kerberos.createTestPrincipals();
kerberos.createKrb5File();
properties.put("java.security.krb5.conf", "src/test/resources/krb5.conf");
//Start kafka container
kafka = new StrimziKafkaContainer()
.withBrokerId(0)
.withBootstrapServers(
c -> String.format("SASL_PLAINTEXT://%s:%s", c.getHost(), c.getMappedPort(KAFKA_PORT)))
.withKafkaConfigurationMap(Map.ofEntries(
entry("listener.security.protocol.map",
"SASL_PLAINTEXT:SASL_PLAINTEXT,BROKER1:PLAINTEXT,CONTROLLER:PLAINTEXT"),
entry("inter.broker.listener.name", "SASL_PLAINTEXT"),
entry("sasl.enabled.mechanisms", "GSSAPI"),
entry("sasl.mechanism.inter.broker.protocol", "GSSAPI"),
entry("listener.name.sasl_plaintext.gssapi.sasl.jaas.config",
"com.sun.security.auth.module.Krb5LoginModule required " +
"useKeyTab=true storeKey=true debug=true serviceName=\"kafka\" " +
"keyTab=\"/opt/kafka/config/kafkabroker.keytab\" " +
"principal=\"kafka/localhost@EXAMPLE.COM\";"),
entry("sasl.kerberos.service.name", "kafka"),
entry("ssl.endpoint.identification.algorithm", "https"),
entry("ssl.client.auth", "none")))
.withPort(KAFKA_PORT)
.withCopyFileToContainer(MountableFile.forClasspathResource("krb5KafkaBroker.conf"),
"/etc/krb5.conf")
.withCopyFileToContainer(MountableFile.forHostPath("target/kafkabroker.keytab"),
"/opt/kafka/config/kafkabroker.keytab");
kafka.start();
log.info(kafka.getLogs());
properties.put("kafka.bootstrap.servers", kafka.getBootstrapServers());
return properties;
}
@Override
public void stop() {
if (kafka != null) {
kafka.close();
kafka.stop();
}
if (kerberos != null) {
kerberos.close();
kerberos.stop();
}
}
}
|
KafkaSaslTestResource
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/ids/MulId.java
|
{
"start": 240,
"end": 1238
}
|
class ____ implements Serializable {
private Integer id1;
private Integer id2;
public MulId() {
}
public MulId(Integer id1, Integer id2) {
this.id1 = id1;
this.id2 = id2;
}
public Integer getId1() {
return id1;
}
public void setId1(Integer id1) {
this.id1 = id1;
}
public Integer getId2() {
return id2;
}
public void setId2(Integer id2) {
this.id2 = id2;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof MulId) ) {
return false;
}
MulId mulId = (MulId) o;
if ( id1 != null ? !id1.equals( mulId.id1 ) : mulId.id1 != null ) {
return false;
}
if ( id2 != null ? !id2.equals( mulId.id2 ) : mulId.id2 != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id1 != null ? id1.hashCode() : 0);
result = 31 * result + (id2 != null ? id2.hashCode() : 0);
return result;
}
public String toString() {
return "MulId(" + id1 + ", " + id2 + ")";
}
}
|
MulId
|
java
|
apache__maven
|
its/core-it-support/core-it-plugins/maven-it-plugin-error/src/main/java/org/apache/maven/plugin/coreit/NoClassDefFoundErrorInterfaceMojo.java
|
{
"start": 1162,
"end": 1340
}
|
class ____ won't be
* loadable when that dependency is missing (in the runtime environment).
*/
@Mojo(name = "no-class-def-found-error-mojo", requiresProject = false)
public
|
itself
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit4TestNotRunTest.java
|
{
"start": 5874,
"end": 6442
}
|
class ____ {
// BUG: Diagnostic contains: @Test
public void shouldDoSomething() {
checkState(false);
}
}
""")
.doTest();
}
@Test
public void containsQualifiedCheck_shouldBeTest() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.base.Preconditions;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public
|
Test
|
java
|
apache__kafka
|
core/src/main/java/kafka/docker/Log4jConfiguration.java
|
{
"start": 3954,
"end": 4537
}
|
class ____ {
private String level;
private final Map<String, Object> otherProperties = new LinkedHashMap<>();
public String getLevel() {
return level;
}
public void setLevel(String level) {
this.level = level;
}
@JsonAnyGetter
public Map<String, Object> getOtherProperties() {
return otherProperties;
}
@JsonAnySetter
public void setOtherProperties(String key, Object value) {
otherProperties.put(key, value);
}
}
@JsonPropertyOrder({ "name", "level" })
@JsonIgnoreProperties(ignoreUnknown = true)
|
Root
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectY.java
|
{
"start": 96,
"end": 1377
}
|
class ____ {
private List<ObjectY_A> a;
private long b;
private int c = 0;
private boolean d = false;
private int e = -1;
private int f = 0;
private int g = 0;
private int h;
private boolean i =false;
private List<Integer> j;
private List<Integer> k;
public List<ObjectY_A> getA() {
return a;
}
public void setA(List<ObjectY_A> a) {
this.a = a;
}
public long getB() {
return b;
}
public void setB(long b) {
this.b = b;
}
public int getC() {
return c;
}
public void setC(int c) {
this.c = c;
}
public boolean isD() {
return d;
}
public void setD(boolean d) {
this.d = d;
}
public int getE() {
return e;
}
public void setE(int e) {
this.e = e;
}
public int getF() {
return f;
}
public void setF(int f) {
this.f = f;
}
public int getG() {
return g;
}
public void setG(int g) {
this.g = g;
}
public int getH() {
return h;
}
public void setH(int h) {
this.h = h;
}
public boolean isI() {
return i;
}
public void setI(boolean i) {
this.i = i;
}
public List<Integer> getJ() {
return j;
}
public void setJ(List<Integer> j) {
this.j = j;
}
public List<Integer> getK() {
return k;
}
public void setK(List<Integer> k) {
this.k = k;
}
}
|
ObjectY
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/graph/GraphQueryResponseItem.java
|
{
"start": 1586,
"end": 1922
}
|
interface ____ extends GraphQueryResponseItem {
long id();
String type();
long source();
long destination();
List<ScalarItem> properties();
ScalarItem get(String property);
@Override
default Kind kind() {
return Kind.RELATION;
}
}
}
|
RelationItem
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java
|
{
"start": 1870,
"end": 2036
}
|
class ____ extends ESTestCase {
private static final Literal FIVE = of(5);
private static final Literal SIX = of(6);
public static final
|
OptimizerRulesTests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.java
|
{
"start": 1074,
"end": 1940
}
|
class ____ extends
TestErasureCodingPolicyWithSnapshot {
private static final Logger LOG = LoggerFactory.getLogger(
TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.class);
private ErasureCodingPolicy ecPolicy;
public TestErasureCodingPolicyWithSnapshotWithRandomECPolicy() {
// If you want to debug this test with a specific ec policy, please use
// SystemErasureCodingPolicies class.
// e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
LOG.info("run {} with {}.",
TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.class
.getSuperclass().getSimpleName(), ecPolicy.getName());
}
@Override
public ErasureCodingPolicy getEcPolicy() {
return ecPolicy;
}
}
|
TestErasureCodingPolicyWithSnapshotWithRandomECPolicy
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_jared1.java
|
{
"start": 189,
"end": 483
}
|
class ____ extends TestCase {
public void test_for_jared1() throws Exception {
User user = new User();
String text = JSON.toJSONString(user);
System.out.println(text);
JSON.parseObject(text, User.class);
}
public static
|
Bug_for_jared1
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/ClassLevelComputedPermissionsAllowedTest.java
|
{
"start": 864,
"end": 2150
}
|
class ____ {
private static final String IGNORED = "ignored";
private static final Set<Permission> CHECKING_PERMISSION = Set.of(new Permission("permission_name") {
@Override
public boolean implies(Permission permission) {
return permission.implies(this);
}
@Override
public boolean equals(Object obj) {
return false;
}
@Override
public int hashCode() {
return 0;
}
@Override
public String getActions() {
return null;
}
});
private static final String SUCCESS = "success";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(IdentityMock.class, AuthData.class, SecurityTestUtils.class));
@Inject
AutodetectParamsBean autodetectParamsBean;
@Inject
ExplicitlyMatchedParamsBean explicitlyMatchedParamsBean;
@Test
public void testAutodetectedParams() {
var anonymous = new AuthData(null, true, null, CHECKING_PERMISSION);
var user = new AuthData(Collections.singleton("user"), false, "user", CHECKING_PERMISSION);
// secured
|
ClassLevelComputedPermissionsAllowedTest
|
java
|
apache__kafka
|
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsCoordinatorRecordHelpersTest.java
|
{
"start": 3241,
"end": 40404
}
|
class ____ {
public static final String CLIENT_HOST = "client-host";
public static final String CLIENT_ID = "client-id";
public static final String CONFIG_NAME_1 = "config-name1";
public static final String CONFIG_NAME_2 = "config-name2";
public static final String CONFIG_VALUE_1 = "config-value1";
public static final String CONFIG_VALUE_2 = "config-value2";
public static final String GROUP_ID = "group-id";
public static final String INSTANCE_ID = "instance-id";
public static final String MEMBER_ID = "member-id";
public static final String PROCESS_ID = "process-id";
public static final String RACK_1 = "rack1";
public static final String RACK_2 = "rack2";
public static final String RACK_3 = "rack3";
public static final String SUBTOPOLOGY_1 = "subtopology1";
public static final String SUBTOPOLOGY_2 = "subtopology2";
public static final String SUBTOPOLOGY_3 = "subtopology3";
public static final String TAG_1 = "tag1";
public static final String TAG_2 = "tag2";
public static final String TOPIC_1 = "topic1";
public static final String TOPIC_2 = "topic2";
public static final String TOPIC_BAR = "bar";
public static final String TOPIC_CHANGELOG = "changelog";
public static final String TOPIC_FOO = "foo";
public static final String TOPIC_REGEX = "regex";
public static final String TOPIC_REPARTITION = "repartition";
public static final String USER_ENDPOINT = "user-endpoint";
public static final String VALUE_1 = "value1";
public static final String VALUE_2 = "value2";
public static final int REBALANCE_TIMEOUT_MS = 1000;
public static final int USER_ENDPOINT_PORT = 40;
@Test
public void testNewStreamsGroupMemberRecord() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupMemberMetadataKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupMemberMetadataValue()
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(List.of(
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_1).setValue(VALUE_1),
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_2).setValue(VALUE_2)
)),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupMemberRecordWithNullRackId() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(null)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupMemberMetadataKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupMemberMetadataValue()
.setRackId(null)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(List.of(
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_1).setValue(VALUE_1),
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_2).setValue(VALUE_2)
)),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupMemberRecordWithNullInstanceId() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(RACK_1)
.setInstanceId(null)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupMemberMetadataKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupMemberMetadataValue()
.setRackId(RACK_1)
.setInstanceId(null)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new StreamsGroupMemberMetadataValue.Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(List.of(
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_1).setValue(VALUE_1),
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_2).setValue(VALUE_2)
)),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupMemberRecordWithNullUserEndpoint() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(null)
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupMemberMetadataKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupMemberMetadataValue()
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(null)
.setClientTags(List.of(
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_1).setValue(VALUE_1),
new StreamsGroupMemberMetadataValue.KeyValue().setKey(TAG_2).setValue(VALUE_2)
)),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupMemberTombstoneRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupMemberMetadataKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMemberTombstoneRecord(GROUP_ID, MEMBER_ID));
}
@Test
public void testNewStreamsGroupMetadataRecordWithNullAssignmentConfig() {
assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMetadataRecord(GROUP_ID, 42, 43, 44, null));
}
@Test
public void testNewStreamsGroupMetadataRecord() {
List<StreamsGroupMetadataValue.LastAssignmentConfig> expectedAssignmentConfigs = List.of(
new StreamsGroupMetadataValue.LastAssignmentConfig()
.setKey("num.standby.replicas")
.setValue("2")
);
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupMetadataKey()
.setGroupId(GROUP_ID),
new ApiMessageAndVersion(
new StreamsGroupMetadataValue()
.setEpoch(42)
.setMetadataHash(43)
.setValidatedTopologyEpoch(44)
.setLastAssignmentConfigs(expectedAssignmentConfigs),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupMetadataRecord(GROUP_ID, 42, 43, 44, Map.of(
"num.standby.replicas", "2"
)));
}
@Test
public void testNewStreamsGroupEpochTombstoneRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupMetadataKey()
.setGroupId(GROUP_ID)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupEpochTombstoneRecord(GROUP_ID));
}
@Test
public void testNewStreamsGroupTargetAssignmentRecord() {
Map<String, Set<Integer>> activeTasks = Map.of(SUBTOPOLOGY_1, Set.of(1, 2, 3));
Map<String, Set<Integer>> standbyTasks = Map.of(SUBTOPOLOGY_2, Set.of(4, 5, 6));
Map<String, Set<Integer>> warmupTasks = Map.of(SUBTOPOLOGY_3, Set.of(7, 8, 9));
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupTargetAssignmentMemberKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupTargetAssignmentMemberValue()
.setActiveTasks(List.of(
new StreamsGroupTargetAssignmentMemberValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(List.of(1, 2, 3))
))
.setStandbyTasks(List.of(
new StreamsGroupTargetAssignmentMemberValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(List.of(4, 5, 6))
))
.setWarmupTasks(List.of(
new StreamsGroupTargetAssignmentMemberValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_3)
.setPartitions(List.of(7, 8, 9))
)),
(short) 0
)
);
assertEquals(expectedRecord,
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord(GROUP_ID, MEMBER_ID,
new TasksTuple(activeTasks, standbyTasks, warmupTasks)));
}
@ParameterizedTest
@EnumSource(TaskRole.class)
public void testNewStreamsGroupTargetAssignmentRecordWithEmptyTaskIds(TaskRole taskRole) {
final StreamsGroupTargetAssignmentMemberValue targetAssignmentMemberValue = new StreamsGroupTargetAssignmentMemberValue();
final List<TaskIds> taskIds = List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY_1).setPartitions(List.of(1, 2, 3)));
switch (taskRole) {
case ACTIVE:
targetAssignmentMemberValue.setActiveTasks(taskIds);
break;
case STANDBY:
targetAssignmentMemberValue.setStandbyTasks(taskIds);
break;
case WARMUP:
targetAssignmentMemberValue.setWarmupTasks(taskIds);
break;
}
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupTargetAssignmentMemberKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
targetAssignmentMemberValue,
(short) 0
)
);
assertEquals(expectedRecord,
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord(GROUP_ID, MEMBER_ID,
mkTasksTuple(taskRole, mkTasks(SUBTOPOLOGY_1, 1, 2, 3))));
}
@Test
public void testNewStreamsGroupTargetAssignmentTombstoneRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupTargetAssignmentMemberKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID)
);
assertEquals(expectedRecord,
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentTombstoneRecord(GROUP_ID, MEMBER_ID));
}
@Test
public void testNewStreamsGroupTargetAssignmentEpochRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupTargetAssignmentMetadataKey()
.setGroupId(GROUP_ID),
new ApiMessageAndVersion(
new StreamsGroupTargetAssignmentMetadataValue()
.setAssignmentEpoch(42),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochRecord(GROUP_ID, 42));
}
@Test
public void testNewStreamsGroupTargetAssignmentEpochTombstoneRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupTargetAssignmentMetadataKey()
.setGroupId(GROUP_ID)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochTombstoneRecord(GROUP_ID));
}
@Test
public void testNewStreamsGroupCurrentAssignmentRecord() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setMemberEpoch(1)
.setPreviousMemberEpoch(0)
.setState(MemberState.STABLE)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.setAssignedTasks(new TasksTupleWithEpochs(
mkTasksWithEpochsPerSubtopology(
mkTasksWithEpochs(SUBTOPOLOGY_1, Map.of(1, 10, 2, 11, 3, 12))
),
mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY_2, 4, 5, 6)),
mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY_3, 7, 8, 9))
))
.setTasksPendingRevocation(new TasksTupleWithEpochs(
mkTasksWithEpochsPerSubtopology(
mkTasksWithEpochs(SUBTOPOLOGY_1, Map.of(1, 5, 2, 6, 3, 7))
),
mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY_2, 4, 5, 6)),
mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY_3, 7, 8, 9))
))
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupCurrentMemberAssignmentKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupCurrentMemberAssignmentValue()
.setMemberEpoch(1)
.setPreviousMemberEpoch(0)
.setState(MemberState.STABLE.value())
.setActiveTasks(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(List.of(1, 2, 3))
.setAssignmentEpochs(List.of(10, 11, 12))
))
.setStandbyTasks(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(List.of(4, 5, 6))
))
.setWarmupTasks(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_3)
.setPartitions(List.of(7, 8, 9))
))
.setActiveTasksPendingRevocation(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(List.of(1, 2, 3))
.setAssignmentEpochs(List.of(5, 6, 7))
))
.setStandbyTasksPendingRevocation(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(List.of(4, 5, 6))
))
.setWarmupTasksPendingRevocation(List.of(
new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_3)
.setPartitions(List.of(7, 8, 9))
)),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupCurrentAssignmentRecordWithEmptyAssignment() {
StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID)
.setRackId(RACK_1)
.setInstanceId(INSTANCE_ID)
.setClientId(CLIENT_ID)
.setClientHost(CLIENT_HOST)
.setRebalanceTimeoutMs(REBALANCE_TIMEOUT_MS)
.setMemberEpoch(1)
.setPreviousMemberEpoch(0)
.setState(MemberState.STABLE)
.setTopologyEpoch(1)
.setProcessId(PROCESS_ID)
.setUserEndpoint(new Endpoint().setHost(USER_ENDPOINT).setPort(USER_ENDPOINT_PORT))
.setClientTags(Map.of(TAG_1, VALUE_1, TAG_2, VALUE_2))
.setAssignedTasks(TasksTupleWithEpochs.EMPTY)
.setTasksPendingRevocation(TasksTupleWithEpochs.EMPTY)
.build();
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupCurrentMemberAssignmentKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID),
new ApiMessageAndVersion(
new StreamsGroupCurrentMemberAssignmentValue()
.setMemberEpoch(1)
.setPreviousMemberEpoch(0)
.setState(MemberState.STABLE.value())
.setActiveTasks(List.of())
.setStandbyTasks(List.of())
.setWarmupTasks(List.of())
.setActiveTasksPendingRevocation(List.of())
.setStandbyTasksPendingRevocation(List.of())
.setWarmupTasksPendingRevocation(List.of()),
(short) 0
)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentRecord(GROUP_ID, member));
}
@Test
public void testNewStreamsGroupCurrentAssignmentTombstoneRecord() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupCurrentMemberAssignmentKey()
.setGroupId(GROUP_ID)
.setMemberId(MEMBER_ID)
);
assertEquals(expectedRecord,
StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentTombstoneRecord(GROUP_ID, MEMBER_ID));
}
@Test
public void testNewStreamsGroupTopologyRecord() {
StreamsGroupHeartbeatRequestData.Topology topology =
new StreamsGroupHeartbeatRequestData.Topology()
.setEpoch(42)
.setSubtopologies(
List.of(new StreamsGroupHeartbeatRequestData.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_1)
.setRepartitionSinkTopics(List.of(TOPIC_FOO))
.setSourceTopics(List.of(TOPIC_BAR))
.setSourceTopicRegex(List.of(TOPIC_REGEX))
.setRepartitionSourceTopics(
List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo()
.setName(TOPIC_REPARTITION)
.setPartitions(4)
.setReplicationFactor((short) 3)
.setTopicConfigs(List.of(
new StreamsGroupHeartbeatRequestData.KeyValue()
.setKey(CONFIG_NAME_1)
.setValue(CONFIG_VALUE_1)
))
)
)
.setStateChangelogTopics(
List.of(
new StreamsGroupHeartbeatRequestData.TopicInfo()
.setName(TOPIC_CHANGELOG)
.setReplicationFactor((short) 2)
.setTopicConfigs(List.of(
new StreamsGroupHeartbeatRequestData.KeyValue()
.setKey(CONFIG_NAME_2)
.setValue(CONFIG_VALUE_2)
))
)
)
.setCopartitionGroups(List.of(
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setSourceTopics(List.of((short) 0))
.setRepartitionSourceTopics(List.of((short) 0)),
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setSourceTopicRegex(List.of((short) 0))
)),
new StreamsGroupHeartbeatRequestData.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_1)
.setRepartitionSinkTopics(List.of())
.setSourceTopics(List.of(TOPIC_BAR))
.setSourceTopicRegex(List.of())
.setRepartitionSourceTopics(List.of())
.setStateChangelogTopics(List.of())
.setCopartitionGroups(List.of())
)
);
StreamsGroupTopologyValue expectedTopology =
new StreamsGroupTopologyValue()
.setEpoch(42)
.setSubtopologies(
List.of(new StreamsGroupTopologyValue.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_1)
.setRepartitionSinkTopics(List.of(TOPIC_FOO))
.setSourceTopics(List.of(TOPIC_BAR))
.setSourceTopicRegex(List.of(TOPIC_REGEX))
.setRepartitionSourceTopics(
List.of(
new StreamsGroupTopologyValue.TopicInfo()
.setName(TOPIC_REPARTITION)
.setPartitions(4)
.setReplicationFactor((short) 3)
.setTopicConfigs(List.of(
new StreamsGroupTopologyValue.TopicConfig()
.setKey(CONFIG_NAME_1)
.setValue(CONFIG_VALUE_1)
))
)
)
.setStateChangelogTopics(
List.of(
new StreamsGroupTopologyValue.TopicInfo()
.setName(TOPIC_CHANGELOG)
.setReplicationFactor((short) 2)
.setTopicConfigs(List.of(
new StreamsGroupTopologyValue.TopicConfig()
.setKey(CONFIG_NAME_2)
.setValue(CONFIG_VALUE_2)
))
)
)
.setCopartitionGroups(List.of(
new StreamsGroupTopologyValue.CopartitionGroup()
.setSourceTopics(List.of((short) 0))
.setRepartitionSourceTopics(List.of((short) 0)),
new StreamsGroupTopologyValue.CopartitionGroup()
.setSourceTopicRegex(List.of((short) 0))
)),
new StreamsGroupTopologyValue.Subtopology()
.setSubtopologyId(SUBTOPOLOGY_1)
.setRepartitionSinkTopics(List.of())
.setSourceTopics(List.of(TOPIC_BAR))
.setSourceTopicRegex(List.of())
.setRepartitionSourceTopics(List.of())
.setStateChangelogTopics(List.of())
.setCopartitionGroups(List.of())
)
);
CoordinatorRecord expectedRecord = CoordinatorRecord.record(
new StreamsGroupTopologyKey()
.setGroupId(GROUP_ID),
new ApiMessageAndVersion(
expectedTopology,
(short) 0));
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord(GROUP_ID, topology));
}
@Test
public void testNewStreamsGroupTopologyRecordTombstone() {
CoordinatorRecord expectedRecord = CoordinatorRecord.tombstone(
new StreamsGroupTopologyKey()
.setGroupId(GROUP_ID)
);
assertEquals(expectedRecord, StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecordTombstone(GROUP_ID));
}
@Test
public void testNewStreamsGroupMemberRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(null, mock(StreamsGroupMember.class)));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupMemberRecordNullMember() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord("groupId", null));
assertEquals("member should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupMemberTombstoneRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMemberTombstoneRecord(null, "memberId"));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupMemberTombstoneRecordNullMemberId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMemberTombstoneRecord("groupId", null));
assertEquals("memberId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupMetadataRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupMetadataRecord(null, 1, 1, 1, Map.of()));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupEpochTombstoneRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupEpochTombstoneRecord(null));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord(null, "memberId", mock(TasksTuple.class)));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentRecordNullMemberId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord("groupId", null, mock(TasksTuple.class)));
assertEquals("memberId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentRecordNullAssignment() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord("groupId", "memberId", null));
assertEquals("assignment should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentTombstoneRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentTombstoneRecord(null, "memberId"));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentTombstoneRecordNullMemberId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentTombstoneRecord("groupId", null));
assertEquals("memberId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentEpochRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochRecord(null, 1));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTargetAssignmentEpochTombstoneRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochTombstoneRecord(null));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupCurrentAssignmentRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentRecord(null, mock(StreamsGroupMember.class)));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupCurrentAssignmentRecordNullMember() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentRecord("groupId", null));
assertEquals("member should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupCurrentAssignmentTombstoneRecordNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentTombstoneRecord(null, "memberId"));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupCurrentAssignmentTombstoneRecordNullMemberId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentTombstoneRecord("groupId", null));
assertEquals("memberId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTopologyRecordWithValueNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord(null, mock(StreamsGroupTopologyValue.class)));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTopologyRecordWithTopologyNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord(null, mock(StreamsGroupHeartbeatRequestData.Topology.class)));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTopologyRecordNullTopology() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord("groupId", (StreamsGroupHeartbeatRequestData.Topology) null));
assertEquals("topology should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTopologyRecordNullValue() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord("groupId", (StreamsGroupTopologyValue) null));
assertEquals("value should not be null here", exception.getMessage());
}
@Test
public void testNewStreamsGroupTopologyRecordTombstoneNullGroupId() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecordTombstone(null));
assertEquals("groupId should not be null here", exception.getMessage());
}
@Test
public void testConvertToStreamsGroupTopologyRecordNullTopology() {
NullPointerException exception = assertThrows(NullPointerException.class, () ->
StreamsCoordinatorRecordHelpers.convertToStreamsGroupTopologyRecord(null));
assertEquals("topology should not be null here", exception.getMessage());
}
}
|
StreamsCoordinatorRecordHelpersTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java
|
{
"start": 33008,
"end": 34326
}
|
class ____ extends DataFrameDataExtractor {
private Queue<SearchResponse> responses = new LinkedList<>();
private List<SearchRequestBuilder> capturedSearchRequests = new ArrayList<>();
private SearchResponse alwaysResponse;
TestExtractor(Client client, DataFrameDataExtractorContext context) {
super(client, context);
}
void setNextResponse(SearchResponse searchResponse) {
if (alwaysResponse != null) {
throw new IllegalStateException("Should not set next response when an always response has been set");
}
responses.add(searchResponse);
}
void setAlwaysResponse(SearchResponse searchResponse) {
alwaysResponse = searchResponse;
}
@Override
protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
capturedSearchRequests.add(searchRequestBuilder);
SearchResponse searchResponse = alwaysResponse == null ? responses.remove() : alwaysResponse;
if (searchResponse.getShardFailures() != null) {
throw new RuntimeException(searchResponse.getShardFailures()[0].getCause());
}
return searchResponse;
}
}
private static
|
TestExtractor
|
java
|
grpc__grpc-java
|
okhttp/third_party/okhttp/main/java/io/grpc/okhttp/internal/framed/Hpack.java
|
{
"start": 1319,
"end": 6977
}
|
class ____ {
private static final int PREFIX_4_BITS = 0x0f;
private static final int PREFIX_5_BITS = 0x1f;
private static final int PREFIX_6_BITS = 0x3f;
private static final int PREFIX_7_BITS = 0x7f;
private static final ByteString PSEUDO_PREFIX = ByteString.encodeUtf8(":");
private static final int SETTINGS_HEADER_TABLE_SIZE = 4_096;
/**
* The decoder has ultimate control of the maximum size of the dynamic table but we can choose
* to use less. We'll put a cap at 16K. This is arbitrary but should be enough for most purposes.
*/
private static final int SETTINGS_HEADER_TABLE_SIZE_LIMIT = 16_384;
private static final io.grpc.okhttp.internal.framed.Header[] STATIC_HEADER_TABLE = new io.grpc.okhttp.internal.framed.Header[] {
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_AUTHORITY, ""),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_METHOD, "GET"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_METHOD, "POST"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_PATH, "/"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_PATH, "/index.html"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_SCHEME, "http"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.TARGET_SCHEME, "https"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "200"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "204"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "206"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "304"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "400"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "404"),
new io.grpc.okhttp.internal.framed.Header(io.grpc.okhttp.internal.framed.Header.RESPONSE_STATUS, "500"),
new io.grpc.okhttp.internal.framed.Header("accept-charset", ""),
new io.grpc.okhttp.internal.framed.Header("accept-encoding", "gzip, deflate"),
new io.grpc.okhttp.internal.framed.Header("accept-language", ""),
new io.grpc.okhttp.internal.framed.Header("accept-ranges", ""),
new io.grpc.okhttp.internal.framed.Header("accept", ""),
new io.grpc.okhttp.internal.framed.Header("access-control-allow-origin", ""),
new io.grpc.okhttp.internal.framed.Header("age", ""),
new io.grpc.okhttp.internal.framed.Header("allow", ""),
new io.grpc.okhttp.internal.framed.Header("authorization", ""),
new io.grpc.okhttp.internal.framed.Header("cache-control", ""),
new io.grpc.okhttp.internal.framed.Header("content-disposition", ""),
new io.grpc.okhttp.internal.framed.Header("content-encoding", ""),
new io.grpc.okhttp.internal.framed.Header("content-language", ""),
new io.grpc.okhttp.internal.framed.Header("content-length", ""),
new io.grpc.okhttp.internal.framed.Header("content-location", ""),
new io.grpc.okhttp.internal.framed.Header("content-range", ""),
new io.grpc.okhttp.internal.framed.Header("content-type", ""),
new io.grpc.okhttp.internal.framed.Header("cookie", ""),
new io.grpc.okhttp.internal.framed.Header("date", ""),
new io.grpc.okhttp.internal.framed.Header("etag", ""),
new io.grpc.okhttp.internal.framed.Header("expect", ""),
new io.grpc.okhttp.internal.framed.Header("expires", ""),
new io.grpc.okhttp.internal.framed.Header("from", ""),
new io.grpc.okhttp.internal.framed.Header("host", ""),
new io.grpc.okhttp.internal.framed.Header("if-match", ""),
new io.grpc.okhttp.internal.framed.Header("if-modified-since", ""),
new io.grpc.okhttp.internal.framed.Header("if-none-match", ""),
new io.grpc.okhttp.internal.framed.Header("if-range", ""),
new io.grpc.okhttp.internal.framed.Header("if-unmodified-since", ""),
new io.grpc.okhttp.internal.framed.Header("last-modified", ""),
new io.grpc.okhttp.internal.framed.Header("link", ""),
new io.grpc.okhttp.internal.framed.Header("location", ""),
new io.grpc.okhttp.internal.framed.Header("max-forwards", ""),
new io.grpc.okhttp.internal.framed.Header("proxy-authenticate", ""),
new io.grpc.okhttp.internal.framed.Header("proxy-authorization", ""),
new io.grpc.okhttp.internal.framed.Header("range", ""),
new io.grpc.okhttp.internal.framed.Header("referer", ""),
new io.grpc.okhttp.internal.framed.Header("refresh", ""),
new io.grpc.okhttp.internal.framed.Header("retry-after", ""),
new io.grpc.okhttp.internal.framed.Header("server", ""),
new io.grpc.okhttp.internal.framed.Header("set-cookie", ""),
new io.grpc.okhttp.internal.framed.Header("strict-transport-security", ""),
new io.grpc.okhttp.internal.framed.Header("transfer-encoding", ""),
new io.grpc.okhttp.internal.framed.Header("user-agent", ""),
new io.grpc.okhttp.internal.framed.Header("vary", ""),
new io.grpc.okhttp.internal.framed.Header("via", ""),
new io.grpc.okhttp.internal.framed.Header("www-authenticate", "")
};
private Hpack() {
}
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-12#section-3.1
static final
|
Hpack
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NettyHttpEndpointBuilderFactory.java
|
{
"start": 1581,
"end": 27970
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedNettyHttpEndpointConsumerBuilder advanced() {
return (AdvancedNettyHttpEndpointConsumerBuilder) this;
}
/**
* If the option is true, the producer will ignore the
* NettyHttpConstants.HTTP_URI header, and use the endpoint's URI for
* request. You may also set the throwExceptionOnFailure to be false to
* let the producer send all the fault response back. The consumer
* working in the bridge mode will skip the gzip compression and WWW URL
* form encoding (by adding the Exchange.SKIP_GZIP_ENCODING and
* Exchange.SKIP_WWW_FORM_URLENCODED headers to the consumed exchange).
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param bridgeEndpoint the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder bridgeEndpoint(boolean bridgeEndpoint) {
doSetProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* If the option is true, the producer will ignore the
* NettyHttpConstants.HTTP_URI header, and use the endpoint's URI for
* request. You may also set the throwExceptionOnFailure to be false to
* let the producer send all the fault response back. The consumer
* working in the bridge mode will skip the gzip compression and WWW URL
* form encoding (by adding the Exchange.SKIP_GZIP_ENCODING and
* Exchange.SKIP_WWW_FORM_URLENCODED headers to the consumed exchange).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param bridgeEndpoint the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder bridgeEndpoint(String bridgeEndpoint) {
doSetProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* Whether or not to disconnect(close) from Netty Channel right after
* use.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder disconnect(boolean disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Whether or not to disconnect(close) from Netty Channel right after
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder disconnect(String disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Setting to ensure socket is not closed due to inactivity.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param keepAlive the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder keepAlive(boolean keepAlive) {
doSetProperty("keepAlive", keepAlive);
return this;
}
/**
* Setting to ensure socket is not closed due to inactivity.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param keepAlive the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder keepAlive(String keepAlive) {
doSetProperty("keepAlive", keepAlive);
return this;
}
/**
* Setting to facilitate socket multiplexing.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param reuseAddress the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder reuseAddress(boolean reuseAddress) {
doSetProperty("reuseAddress", reuseAddress);
return this;
}
/**
* Setting to facilitate socket multiplexing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param reuseAddress the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder reuseAddress(String reuseAddress) {
doSetProperty("reuseAddress", reuseAddress);
return this;
}
/**
* This option allows producers and consumers (in client mode) to reuse
* the same Netty Channel for the lifecycle of processing the Exchange.
* This is useful if you need to call a server multiple times in a Camel
* route and want to use the same network connection. When using this,
* the channel is not returned to the connection pool until the Exchange
* is done; or disconnected if the disconnect option is set to true. The
* reused Channel is stored on the Exchange as an exchange property with
* the key CamelNettyChannel which allows you to obtain the channel
* during routing and use it as well.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param reuseChannel the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder reuseChannel(boolean reuseChannel) {
doSetProperty("reuseChannel", reuseChannel);
return this;
}
/**
* This option allows producers and consumers (in client mode) to reuse
* the same Netty Channel for the lifecycle of processing the Exchange.
* This is useful if you need to call a server multiple times in a Camel
* route and want to use the same network connection. When using this,
* the channel is not returned to the connection pool until the Exchange
* is done; or disconnected if the disconnect option is set to true. The
* reused Channel is stored on the Exchange as an exchange property with
* the key CamelNettyChannel which allows you to obtain the channel
* during routing and use it as well.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param reuseChannel the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder reuseChannel(String reuseChannel) {
doSetProperty("reuseChannel", reuseChannel);
return this;
}
/**
* Setting to set endpoint as one-way (false) or request-response
* (true).
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param sync the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sync(boolean sync) {
doSetProperty("sync", sync);
return this;
}
/**
* Setting to set endpoint as one-way (false) or request-response
* (true).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param sync the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sync(String sync) {
doSetProperty("sync", sync);
return this;
}
/**
* Setting to improve TCP protocol performance.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param tcpNoDelay the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder tcpNoDelay(boolean tcpNoDelay) {
doSetProperty("tcpNoDelay", tcpNoDelay);
return this;
}
/**
* Setting to improve TCP protocol performance.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param tcpNoDelay the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder tcpNoDelay(String tcpNoDelay) {
doSetProperty("tcpNoDelay", tcpNoDelay);
return this;
}
/**
* Whether or not Camel should try to find a target consumer by matching
* the URI prefix if no exact match is found.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param matchOnUriPrefix the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder matchOnUriPrefix(boolean matchOnUriPrefix) {
doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
return this;
}
/**
* Whether or not Camel should try to find a target consumer by matching
* the URI prefix if no exact match is found.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param matchOnUriPrefix the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder matchOnUriPrefix(String matchOnUriPrefix) {
doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
return this;
}
/**
* If enabled and an Exchange failed processing on the consumer side the
* response's body won't contain the exception's stack trace.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param muteException the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder muteException(boolean muteException) {
doSetProperty("muteException", muteException);
return this;
}
/**
* If enabled and an Exchange failed processing on the consumer side the
* response's body won't contain the exception's stack trace.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param muteException the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder muteException(String muteException) {
doSetProperty("muteException", muteException);
return this;
}
/**
* Whether to send back HTTP status code 503 when the consumer has been
* suspended. If the option is false then the Netty Acceptor is unbound
* when the consumer is suspended, so clients cannot connect anymore.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param send503whenSuspended the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder send503whenSuspended(boolean send503whenSuspended) {
doSetProperty("send503whenSuspended", send503whenSuspended);
return this;
}
/**
* Whether to send back HTTP status code 503 when the consumer has been
* suspended. If the option is false then the Netty Acceptor is unbound
* when the consumer is suspended, so clients cannot connect anymore.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param send503whenSuspended the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder send503whenSuspended(String send503whenSuspended) {
doSetProperty("send503whenSuspended", send503whenSuspended);
return this;
}
/**
* A list of decoders to be used. You can use a String which have values
* separated by comma, and have the values be looked up in the Registry.
* Just remember to prefix the value with # so Camel knows it should
* lookup.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: codec
*
* @param decoders the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder decoders(String decoders) {
doSetProperty("decoders", decoders);
return this;
}
/**
* A list of encoders to be used. You can use a String which have values
* separated by comma, and have the values be looked up in the Registry.
* Just remember to prefix the value with # so Camel knows it should
* lookup.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: codec
*
* @param encoders the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder encoders(String encoders) {
doSetProperty("encoders", encoders);
return this;
}
/**
* Which protocols to enable when using SSL.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: TLSv1.2,TLSv1.3
* Group: security
*
* @param enabledProtocols the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder enabledProtocols(String enabledProtocols) {
doSetProperty("enabledProtocols", enabledProtocols);
return this;
}
/**
* To enable/disable hostname verification on SSLEngine.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param hostnameVerification the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder hostnameVerification(boolean hostnameVerification) {
doSetProperty("hostnameVerification", hostnameVerification);
return this;
}
/**
* To enable/disable hostname verification on SSLEngine.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param hostnameVerification the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder hostnameVerification(String hostnameVerification) {
doSetProperty("hostnameVerification", hostnameVerification);
return this;
}
/**
* Keystore format to be used for payload encryption. Defaults to JKS if
* not set.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyStoreFormat the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder keyStoreFormat(String keyStoreFormat) {
doSetProperty("keyStoreFormat", keyStoreFormat);
return this;
}
/**
* Client side certificate keystore to be used for encryption. Is loaded
* by default from classpath, but you can prefix with classpath:, file:,
* or http: to load the resource from different systems.
*
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyStoreResource the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder keyStoreResource(String keyStoreResource) {
doSetProperty("keyStoreResource", keyStoreResource);
return this;
}
/**
* Configures whether the server needs client authentication when using
* SSL.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param needClientAuth the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder needClientAuth(boolean needClientAuth) {
doSetProperty("needClientAuth", needClientAuth);
return this;
}
/**
* Configures whether the server needs client authentication when using
* SSL.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param needClientAuth the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder needClientAuth(String needClientAuth) {
doSetProperty("needClientAuth", needClientAuth);
return this;
}
/**
* Password to use for the keyStore and trustStore. The same password
* must be configured for both resources.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param passphrase the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder passphrase(String passphrase) {
doSetProperty("passphrase", passphrase);
return this;
}
/**
* Refers to a
* org.apache.camel.component.netty.http.NettyHttpSecurityConfiguration
* for configuring secure web resources.
*
* The option is a:
* <code>org.apache.camel.component.netty.http.NettyHttpSecurityConfiguration</code> type.
*
* Group: security
*
* @param securityConfiguration the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder securityConfiguration(org.apache.camel.component.netty.http.NettyHttpSecurityConfiguration securityConfiguration) {
doSetProperty("securityConfiguration", securityConfiguration);
return this;
}
/**
* Refers to a
* org.apache.camel.component.netty.http.NettyHttpSecurityConfiguration
* for configuring secure web resources.
*
* The option will be converted to a
* <code>org.apache.camel.component.netty.http.NettyHttpSecurityConfiguration</code> type.
*
* Group: security
*
* @param securityConfiguration the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder securityConfiguration(String securityConfiguration) {
doSetProperty("securityConfiguration", securityConfiguration);
return this;
}
/**
* To configure NettyHttpSecurityConfiguration using key/value pairs
* from the map. This is a multi-value option with prefix:
* securityConfiguration.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* securityOptions(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: security
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder securityOptions(String key, Object value) {
doSetMultiValueProperty("securityOptions", "securityConfiguration." + key, value);
return this;
}
/**
* To configure NettyHttpSecurityConfiguration using key/value pairs
* from the map. This is a multi-value option with prefix:
* securityConfiguration.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* securityOptions(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: security
*
* @param values the values
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder securityOptions(Map values) {
doSetMultiValueProperties("securityOptions", "securityConfiguration.", values);
return this;
}
/**
* Security provider to be used for payload encryption. Defaults to
* SunX509 if not set.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param securityProvider the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder securityProvider(String securityProvider) {
doSetProperty("securityProvider", securityProvider);
return this;
}
/**
* Setting to specify whether SSL encryption is applied to this
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param ssl the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder ssl(boolean ssl) {
doSetProperty("ssl", ssl);
return this;
}
/**
* Setting to specify whether SSL encryption is applied to this
* endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param ssl the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder ssl(String ssl) {
doSetProperty("ssl", ssl);
return this;
}
/**
* When enabled and in SSL mode, then the Netty consumer will enrich the
* Camel Message with headers having information about the client
* certificate such as subject name, issuer name, serial number, and the
* valid date range.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param sslClientCertHeaders the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sslClientCertHeaders(boolean sslClientCertHeaders) {
doSetProperty("sslClientCertHeaders", sslClientCertHeaders);
return this;
}
/**
* When enabled and in SSL mode, then the Netty consumer will enrich the
* Camel Message with headers having information about the client
* certificate such as subject name, issuer name, serial number, and the
* valid date range.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param sslClientCertHeaders the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sslClientCertHeaders(String sslClientCertHeaders) {
doSetProperty("sslClientCertHeaders", sslClientCertHeaders);
return this;
}
/**
* To configure security using SSLContextParameters.
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* To configure security using SSLContextParameters.
*
* The option will be converted to a
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default NettyHttpEndpointConsumerBuilder sslContextParameters(String sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Reference to a
|
NettyHttpEndpointConsumerBuilder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterUnusedInFormalsTest.java
|
{
"start": 825,
"end": 1184
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(TypeParameterUnusedInFormals.class, getClass());
@Test
public void evilCastImpl() {
compilationHelper
.addSourceLines(
"Test.java",
"""
package foo.bar;
|
TypeParameterUnusedInFormalsTest
|
java
|
apache__flink
|
flink-table/flink-sql-gateway/src/test/java/org/apache/flink/table/gateway/rest/UtilITCase.java
|
{
"start": 1992,
"end": 3459
}
|
class ____ extends RestAPIITCaseBase {
private static final GetInfoHeaders getInfoHeaders = GetInfoHeaders.getInstance();
private static final EmptyRequestBody emptyRequestBody = EmptyRequestBody.getInstance();
private static final EmptyMessageParameters emptyParameters =
EmptyMessageParameters.getInstance();
private static final GetApiVersionHeaders getApiVersionHeaders =
GetApiVersionHeaders.getInstance();
@Test
void testGetInfoAndApiVersion() throws Exception {
CompletableFuture<GetInfoResponseBody> response =
sendRequest(getInfoHeaders, emptyParameters, emptyRequestBody);
String productName = response.get().getProductName();
String version = response.get().getProductVersion();
assertEquals(GetInfoHandler.PRODUCT_NAME, productName);
assertEquals(EnvironmentInformation.getVersion(), version);
CompletableFuture<GetApiVersionResponseBody> response2 =
sendRequest(getApiVersionHeaders, emptyParameters, emptyRequestBody);
List<String> versions = response2.get().getVersions();
assertThat(
Arrays.stream(SqlGatewayRestAPIVersion.values())
.filter(SqlGatewayRestAPIVersion::isStableVersion)
.map(Enum::name)
.collect(Collectors.toList()))
.isEqualTo(versions);
}
}
|
UtilITCase
|
java
|
apache__spark
|
core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java
|
{
"start": 2673,
"end": 2847
}
|
class ____ {
public static long computePrefix(UTF8String value) {
return value == null ? 0L : value.getPrefix();
}
}
public static final
|
StringPrefixComparator
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java
|
{
"start": 1419,
"end": 3612
}
|
class ____ extends Suggestion.Entry<PhraseSuggestion.Entry.Option> {
protected double cutoffScore = Double.MIN_VALUE;
public Entry(Text text, int offset, int length, double cutoffScore) {
super(text, offset, length);
this.cutoffScore = cutoffScore;
}
public Entry(Text text, int offset, int length) {
super(text, offset, length);
}
public Entry() {}
public Entry(StreamInput in) throws IOException {
super(in);
cutoffScore = in.readDouble();
}
@Override
protected void merge(Suggestion.Entry<Option> other) {
super.merge(other);
// If the cluster contains both pre 0.90.4 and post 0.90.4 nodes then we'll see Suggestion.Entry
// objects being merged with PhraseSuggestion.Entry objects. We merge Suggestion.Entry objects
// by assuming they had a low cutoff score rather than a high one as that is the more common scenario
// and the simplest one for us to implement.
if ((other instanceof PhraseSuggestion.Entry) == false) {
return;
}
PhraseSuggestion.Entry otherSuggestionEntry = (PhraseSuggestion.Entry) other;
this.cutoffScore = Math.max(this.cutoffScore, otherSuggestionEntry.cutoffScore);
}
@Override
public void addOption(Option option) {
if (option.getScore() > this.cutoffScore) {
this.options.add(option);
}
}
@Override
protected Option newOption(StreamInput in) throws IOException {
return new Option(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeDouble(cutoffScore);
}
@Override
public boolean equals(Object other) {
return super.equals(other) && Objects.equals(cutoffScore, ((Entry) other).cutoffScore);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), cutoffScore);
}
public static
|
Entry
|
java
|
apache__camel
|
dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/BasePackageScanDownloadListener.java
|
{
"start": 9087,
"end": 10248
}
|
class ____
name = StringHelper.decapitalize(name);
}
// must be lazy as we do not know if the bean is in use or not
Supplier<Object> supplier = () -> camelContext.getInjector().newInstance(clazz, true);
bindBean(camelContext, clazz, name, supplier, "Spring @Component/@Service");
}
}
private static void bindBean(CamelContext context, Class<?> type, String name, Supplier<Object> supplier, String kind) {
// to support hot reloading of beans then we need to enable unbind mode in bean post processor
Registry registry = context.getRegistry();
CamelBeanPostProcessor bpp = PluginHelper.getBeanPostProcessor(context);
bpp.setUnbindEnabled(true);
try {
// re-bind the bean to the registry
registry.unbind(name);
LOG.debug("Lazy binding {} bean: {} of type: {}", kind, name, type);
registry.bind(name, type, supplier);
} catch (Exception e) {
throw RuntimeCamelException.wrapRuntimeException(e);
} finally {
bpp.setUnbindEnabled(false);
}
}
}
|
name
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlQueryLogIT.java
|
{
"start": 2600,
"end": 10203
}
|
class ____ extends AbstractEsqlIntegTestCase {
static MockAppender appender;
static Logger queryLog = LogManager.getLogger(EsqlQueryLog.LOGGER_NAME);
static Level origQueryLogLevel = queryLog.getLevel();
@BeforeClass
public static void init() throws IllegalAccessException {
appender = new MockAppender("trace_appender");
appender.start();
Loggers.addAppender(queryLog, appender);
Loggers.setLevel(queryLog, Level.TRACE);
}
@AfterClass
public static void cleanup() {
Loggers.removeAppender(queryLog, appender);
appender.stop();
Loggers.setLevel(queryLog, origQueryLogLevel);
}
public void testSetLevel() throws Exception {
int numDocs1 = randomIntBetween(1, 15);
assertAcked(client().admin().indices().prepareCreate("index-1").setMapping("host", "type=keyword"));
for (int i = 0; i < numDocs1; i++) {
client().prepareIndex("index-1").setSource("host", "192." + i).get();
}
int numDocs2 = randomIntBetween(1, 15);
assertAcked(client().admin().indices().prepareCreate("index-2").setMapping("host", "type=keyword"));
for (int i = 0; i < numDocs2; i++) {
client().prepareIndex("index-2").setSource("host", "10." + i).get();
}
DiscoveryNode coordinator = randomFrom(clusterService().state().nodes().stream().toList());
client().admin().indices().prepareRefresh("index-1", "index-2").get();
Map<Level, String> levels = Map.of(
Level.WARN,
EsqlPlugin.ESQL_QUERYLOG_THRESHOLD_WARN_SETTING.getKey(),
Level.INFO,
EsqlPlugin.ESQL_QUERYLOG_THRESHOLD_INFO_SETTING.getKey(),
Level.DEBUG,
EsqlPlugin.ESQL_QUERYLOG_THRESHOLD_DEBUG_SETTING.getKey(),
Level.TRACE,
EsqlPlugin.ESQL_QUERYLOG_THRESHOLD_TRACE_SETTING.getKey()
);
testAllLevels(
levels,
coordinator,
0,
"FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100",
null,
null
);
for (int i = 0; i < 10; i++) {
testAllLevels(
levels,
coordinator,
randomIntBetween(0, 500),
"FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100",
null,
null
);
}
testAllLevels(
levels,
coordinator,
600_000,
"FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100",
null,
null
);
testAllLevels(
levels,
coordinator,
0,
"FROM index-* | EVAL a = count(*) | LIMIT 100",
"aggregate function [count(*)] not allowed outside STATS command",
VerificationException.class.getName()
);
for (int i = 0; i < 10; i++) {
testAllLevels(
levels,
coordinator,
randomIntBetween(0, 500),
"FROM index-* | EVAL a = count(*) | LIMIT 100",
"aggregate function [count(*)] not allowed outside STATS command",
VerificationException.class.getName()
);
}
testAllLevels(
levels,
coordinator,
600_000,
"FROM index-* | EVAL a = count(*) | LIMIT 100",
"aggregate function [count(*)] not allowed outside STATS command",
VerificationException.class.getName()
);
}
private static void testAllLevels(
Map<Level, String> levels,
DiscoveryNode coordinator,
long timeoutMillis,
String query,
String expectedErrorMsg,
String expectedException
) throws InterruptedException, ExecutionException {
for (Map.Entry<Level, String> logLevel : levels.entrySet()) {
client().execute(
ClusterUpdateSettingsAction.INSTANCE,
new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings(
Settings.builder().put(logLevel.getValue(), timeoutMillis + "ms")
)
).get();
EsqlQueryRequest request = syncEsqlQueryRequest(query).pragmas(randomPragmas());
CountDownLatch latch = new CountDownLatch(1);
client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.running(() -> {
try {
if (appender.lastEvent() == null) {
if (timeoutMillis == 0) {
fail("Expected a slow log with timeout set to zero");
}
return;
}
var msg = (ESLogMessage) appender.lastMessage();
long took = Long.valueOf(msg.get(ELASTICSEARCH_QUERYLOG_TOOK));
long tookMillisExpected = took / 1_000_000;
long tookMillis = Long.valueOf(msg.get(ELASTICSEARCH_QUERYLOG_TOOK_MILLIS));
assertThat(took, greaterThan(0L));
assertThat(tookMillis, greaterThanOrEqualTo(timeoutMillis));
assertThat(tookMillis, is(tookMillisExpected));
if (expectedException == null) {
long planningTook = Long.valueOf(msg.get(ELASTICSEARCH_QUERYLOG_PLANNING_TOOK));
long planningTookMillisExpected = planningTook / 1_000_000;
long planningTookMillis = Long.valueOf(msg.get(ELASTICSEARCH_QUERYLOG_PLANNING_TOOK_MILLIS));
assertThat(planningTook, greaterThanOrEqualTo(0L));
assertThat(planningTookMillis, is(planningTookMillisExpected));
assertThat(took, greaterThan(planningTook));
}
assertThat(msg.get(ELASTICSEARCH_QUERYLOG_QUERY), is(query));
assertThat(appender.getLastEventAndReset().getLevel(), equalTo(logLevel.getKey()));
boolean success = Booleans.parseBoolean(msg.get(ELASTICSEARCH_QUERYLOG_SUCCESS));
assertThat(success, is(expectedException == null));
if (expectedErrorMsg == null) {
assertThat(msg.get(ELASTICSEARCH_QUERYLOG_ERROR_MESSAGE), is(nullValue()));
} else {
assertThat(msg.get(ELASTICSEARCH_QUERYLOG_ERROR_MESSAGE), containsString(expectedErrorMsg));
}
if (expectedException == null) {
assertThat(msg.get(ELASTICSEARCH_QUERYLOG_ERROR_TYPE), is(nullValue()));
} else {
assertThat(msg.get(ELASTICSEARCH_QUERYLOG_ERROR_TYPE), is(expectedException));
}
} finally {
latch.countDown();
}
}));
safeAwait(latch);
assertEquals("All requests must respond", 0, latch.getCount());
client().execute(
ClusterUpdateSettingsAction.INSTANCE,
new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings(
Settings.builder().putNull(logLevel.getValue())
)
).get();
}
}
}
|
EsqlQueryLogIT
|
java
|
apache__camel
|
components/camel-jetty/src/test/java/org/apache/camel/component/jetty/HttpTwoServerPortsTest.java
|
{
"start": 1066,
"end": 2685
}
|
class ____ extends BaseJettyTest {
@Test
public void testTwoServerPorts() {
String reply = template.requestBody("direct:a", "World", String.class);
assertEquals("Bye World", reply);
reply = template.requestBody("direct:b", "Camel", String.class);
assertEquals("Hi Camel", reply);
reply = template.requestBody("direct:a", "Earth", String.class);
assertEquals("Bye Earth", reply);
reply = template.requestBody("direct:b", "Moon", String.class);
assertEquals("Hi Moon", reply);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:a").to("http://localhost:" + port1 + "/myapp");
from("direct:b").to("http://localhost:" + port2 + "/myotherapp");
from("jetty://http://localhost:" + port1 + "/myapp").process(new Processor() {
public void process(Exchange exchange) {
String in = exchange.getIn().getBody(String.class);
exchange.getMessage().setBody("Bye " + in);
}
});
from("jetty://http://localhost:" + port2 + "/myotherapp").process(new Processor() {
public void process(Exchange exchange) {
String in = exchange.getIn().getBody(String.class);
exchange.getMessage().setBody("Hi " + in);
}
});
}
};
}
}
|
HttpTwoServerPortsTest
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/io/service/SoftServiceLoader.java
|
{
"start": 14207,
"end": 14486
}
|
class ____ extends RuntimeException {
public ServiceLoadingException(String message, Throwable cause) {
super(message, cause);
}
public ServiceLoadingException(Throwable cause) {
super(cause);
}
}
}
|
ServiceLoadingException
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/web/DefaultOAuth2AuthorizedClientManager.java
|
{
"start": 4235,
"end": 13535
}
|
class ____ implements OAuth2AuthorizedClientManager {
// @formatter:off
private static final OAuth2AuthorizedClientProvider DEFAULT_AUTHORIZED_CLIENT_PROVIDER = OAuth2AuthorizedClientProviderBuilder.builder()
.authorizationCode()
.refreshToken()
.clientCredentials()
.build();
// @formatter:on
private final ClientRegistrationRepository clientRegistrationRepository;
private final OAuth2AuthorizedClientRepository authorizedClientRepository;
private OAuth2AuthorizedClientProvider authorizedClientProvider;
private Function<OAuth2AuthorizeRequest, Map<String, Object>> contextAttributesMapper;
private OAuth2AuthorizationSuccessHandler authorizationSuccessHandler;
private OAuth2AuthorizationFailureHandler authorizationFailureHandler;
/**
* Constructs a {@code DefaultOAuth2AuthorizedClientManager} using the provided
* parameters.
* @param clientRegistrationRepository the repository of client registrations
* @param authorizedClientRepository the repository of authorized clients
*/
public DefaultOAuth2AuthorizedClientManager(ClientRegistrationRepository clientRegistrationRepository,
OAuth2AuthorizedClientRepository authorizedClientRepository) {
Assert.notNull(clientRegistrationRepository, "clientRegistrationRepository cannot be null");
Assert.notNull(authorizedClientRepository, "authorizedClientRepository cannot be null");
this.clientRegistrationRepository = clientRegistrationRepository;
this.authorizedClientRepository = authorizedClientRepository;
this.authorizedClientProvider = DEFAULT_AUTHORIZED_CLIENT_PROVIDER;
this.contextAttributesMapper = new DefaultContextAttributesMapper();
this.authorizationSuccessHandler = (authorizedClient, principal, attributes) -> authorizedClientRepository
.saveAuthorizedClient(authorizedClient, principal,
(HttpServletRequest) attributes.get(HttpServletRequest.class.getName()),
(HttpServletResponse) attributes.get(HttpServletResponse.class.getName()));
this.authorizationFailureHandler = new RemoveAuthorizedClientOAuth2AuthorizationFailureHandler(
(clientRegistrationId, principal, attributes) -> authorizedClientRepository.removeAuthorizedClient(
clientRegistrationId, principal,
(HttpServletRequest) attributes.get(HttpServletRequest.class.getName()),
(HttpServletResponse) attributes.get(HttpServletResponse.class.getName())));
}
@Nullable
@Override
public OAuth2AuthorizedClient authorize(OAuth2AuthorizeRequest authorizeRequest) {
Assert.notNull(authorizeRequest, "authorizeRequest cannot be null");
String clientRegistrationId = authorizeRequest.getClientRegistrationId();
OAuth2AuthorizedClient authorizedClient = authorizeRequest.getAuthorizedClient();
Authentication principal = authorizeRequest.getPrincipal();
HttpServletRequest servletRequest = getHttpServletRequestOrDefault(authorizeRequest.getAttributes());
Assert.notNull(servletRequest, "servletRequest cannot be null");
HttpServletResponse servletResponse = getHttpServletResponseOrDefault(authorizeRequest.getAttributes());
Assert.notNull(servletResponse, "servletResponse cannot be null");
OAuth2AuthorizationContext.Builder contextBuilder;
if (authorizedClient != null) {
contextBuilder = OAuth2AuthorizationContext.withAuthorizedClient(authorizedClient);
}
else {
authorizedClient = this.authorizedClientRepository.loadAuthorizedClient(clientRegistrationId, principal,
servletRequest);
if (authorizedClient != null) {
contextBuilder = OAuth2AuthorizationContext.withAuthorizedClient(authorizedClient);
}
else {
ClientRegistration clientRegistration = this.clientRegistrationRepository
.findByRegistrationId(clientRegistrationId);
Assert.notNull(clientRegistration,
"Could not find ClientRegistration with id '" + clientRegistrationId + "'");
contextBuilder = OAuth2AuthorizationContext.withClientRegistration(clientRegistration);
}
}
// @formatter:off
OAuth2AuthorizationContext authorizationContext = contextBuilder.principal(principal)
.attributes((attributes) -> {
Map<String, Object> contextAttributes = this.contextAttributesMapper.apply(authorizeRequest);
if (!CollectionUtils.isEmpty(contextAttributes)) {
attributes.putAll(contextAttributes);
}
})
.build();
// @formatter:on
try {
authorizedClient = this.authorizedClientProvider.authorize(authorizationContext);
}
catch (OAuth2AuthorizationException ex) {
this.authorizationFailureHandler.onAuthorizationFailure(ex, principal,
createAttributes(servletRequest, servletResponse));
throw ex;
}
if (authorizedClient != null) {
this.authorizationSuccessHandler.onAuthorizationSuccess(authorizedClient, principal,
createAttributes(servletRequest, servletResponse));
}
else {
// In the case of re-authorization, the returned `authorizedClient` may be
// null if re-authorization is not supported.
// For these cases, return the provided
// `authorizationContext.authorizedClient`.
if (authorizationContext.getAuthorizedClient() != null) {
return authorizationContext.getAuthorizedClient();
}
}
return authorizedClient;
}
private static Map<String, Object> createAttributes(HttpServletRequest servletRequest,
HttpServletResponse servletResponse) {
Map<String, Object> attributes = new HashMap<>();
attributes.put(HttpServletRequest.class.getName(), servletRequest);
attributes.put(HttpServletResponse.class.getName(), servletResponse);
return attributes;
}
private static HttpServletRequest getHttpServletRequestOrDefault(Map<String, Object> attributes) {
HttpServletRequest servletRequest = (HttpServletRequest) attributes.get(HttpServletRequest.class.getName());
if (servletRequest == null) {
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes instanceof ServletRequestAttributes) {
servletRequest = ((ServletRequestAttributes) requestAttributes).getRequest();
}
}
return servletRequest;
}
private static HttpServletResponse getHttpServletResponseOrDefault(Map<String, Object> attributes) {
HttpServletResponse servletResponse = (HttpServletResponse) attributes.get(HttpServletResponse.class.getName());
if (servletResponse == null) {
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes instanceof ServletRequestAttributes) {
servletResponse = ((ServletRequestAttributes) requestAttributes).getResponse();
}
}
return servletResponse;
}
/**
* Sets the {@link OAuth2AuthorizedClientProvider} used for authorizing (or
* re-authorizing) an OAuth 2.0 Client.
* @param authorizedClientProvider the {@link OAuth2AuthorizedClientProvider} used for
* authorizing (or re-authorizing) an OAuth 2.0 Client
*/
public void setAuthorizedClientProvider(OAuth2AuthorizedClientProvider authorizedClientProvider) {
Assert.notNull(authorizedClientProvider, "authorizedClientProvider cannot be null");
this.authorizedClientProvider = authorizedClientProvider;
}
/**
* Sets the {@code Function} used for mapping attribute(s) from the
* {@link OAuth2AuthorizeRequest} to a {@code Map} of attributes to be associated to
* the {@link OAuth2AuthorizationContext#getAttributes() authorization context}.
* @param contextAttributesMapper the {@code Function} used for supplying the
* {@code Map} of attributes to the {@link OAuth2AuthorizationContext#getAttributes()
* authorization context}
*/
public void setContextAttributesMapper(
Function<OAuth2AuthorizeRequest, Map<String, Object>> contextAttributesMapper) {
Assert.notNull(contextAttributesMapper, "contextAttributesMapper cannot be null");
this.contextAttributesMapper = contextAttributesMapper;
}
/**
* Sets the {@link OAuth2AuthorizationSuccessHandler} that handles successful
* authorizations.
*
* <p>
* The default saves {@link OAuth2AuthorizedClient}s in the
* {@link OAuth2AuthorizedClientRepository}.
* @param authorizationSuccessHandler the {@link OAuth2AuthorizationSuccessHandler}
* that handles successful authorizations
* @since 5.3
*/
public void setAuthorizationSuccessHandler(OAuth2AuthorizationSuccessHandler authorizationSuccessHandler) {
Assert.notNull(authorizationSuccessHandler, "authorizationSuccessHandler cannot be null");
this.authorizationSuccessHandler = authorizationSuccessHandler;
}
/**
* Sets the {@link OAuth2AuthorizationFailureHandler} that handles authorization
* failures.
*
* <p>
* A {@link RemoveAuthorizedClientOAuth2AuthorizationFailureHandler} is used by
* default.
* @param authorizationFailureHandler the {@link OAuth2AuthorizationFailureHandler}
* that handles authorization failures
* @since 5.3
* @see RemoveAuthorizedClientOAuth2AuthorizationFailureHandler
*/
public void setAuthorizationFailureHandler(OAuth2AuthorizationFailureHandler authorizationFailureHandler) {
Assert.notNull(authorizationFailureHandler, "authorizationFailureHandler cannot be null");
this.authorizationFailureHandler = authorizationFailureHandler;
}
/**
* The default implementation of the {@link #setContextAttributesMapper(Function)
* contextAttributesMapper}.
*/
public static
|
DefaultOAuth2AuthorizedClientManager
|
java
|
apache__flink
|
flink-table/flink-table-api-scala/src/test/java/org/apache/flink/table/api/typeutils/ScalaEitherSerializerUpgradeTest.java
|
{
"start": 3628,
"end": 4408
}
|
class ____
implements TypeSerializerUpgradeTestBase.UpgradeVerifier<Either<Integer, String>> {
@Override
public TypeSerializer<Either<Integer, String>> createUpgradedSerializer() {
return new EitherSerializer<>(IntSerializer.INSTANCE, StringSerializer.INSTANCE);
}
@Override
public Condition<Either<Integer, String>> testDataCondition() {
return new Condition<>(value -> new Right<>("Hello world").equals(value), "");
}
@Override
public Condition<TypeSerializerSchemaCompatibility<Either<Integer, String>>>
schemaCompatibilityCondition(FlinkVersion version) {
return TypeSerializerConditions.isCompatibleAsIs();
}
}
}
|
EitherSerializerVerifier
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.