language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/co/CoProcessOperator.java
|
{
"start": 1834,
"end": 3405
}
|
class ____<IN1, IN2, OUT>
extends AbstractUdfStreamOperator<OUT, CoProcessFunction<IN1, IN2, OUT>>
implements TwoInputStreamOperator<IN1, IN2, OUT> {
private static final long serialVersionUID = 1L;
private transient TimestampedCollector<OUT> collector;
private transient ContextImpl context;
/** We listen to this ourselves because we don't have an {@link InternalTimerService}. */
private long currentWatermark = Long.MIN_VALUE;
public CoProcessOperator(CoProcessFunction<IN1, IN2, OUT> flatMapper) {
super(flatMapper);
}
@Override
public void open() throws Exception {
super.open();
collector = new TimestampedCollector<>(output);
context = new ContextImpl(userFunction, getProcessingTimeService());
}
@Override
public void processElement1(StreamRecord<IN1> element) throws Exception {
collector.setTimestamp(element);
context.element = element;
userFunction.processElement1(element.getValue(), context, collector);
context.element = null;
}
@Override
public void processElement2(StreamRecord<IN2> element) throws Exception {
collector.setTimestamp(element);
context.element = element;
userFunction.processElement2(element.getValue(), context, collector);
context.element = null;
}
@Override
public void processWatermark(Watermark mark) throws Exception {
super.processWatermark(mark);
currentWatermark = mark.getTimestamp();
}
private
|
CoProcessOperator
|
java
|
apache__kafka
|
server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java
|
{
"start": 3755,
"end": 5101
}
|
enum ____ implements FeatureVersion {
UT_FV2_0(0, MetadataVersion.MINIMUM_VERSION, Map.of()),
UT_FV2_1(1, MetadataVersion.IBP_3_7_IV0, Map.of());
private final short featureLevel;
private final MetadataVersion bootstrapMetadataVersion;
private final Map<String, Short> dependencies;
public static final String FEATURE_NAME = "unit.test.feature.version.2";
public static final FV2 LATEST_PRODUCTION = UT_FV2_0;
FV2(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map<String, Short> dependencies) {
this.featureLevel = (short) featureLevel;
this.bootstrapMetadataVersion = bootstrapMetadataVersion;
this.dependencies = dependencies;
}
@Override
public short featureLevel() {
return featureLevel;
}
@Override
public String featureName() {
return FEATURE_NAME;
}
@Override
public MetadataVersion bootstrapMetadataVersion() {
return bootstrapMetadataVersion;
}
@Override
public Map<String, Short> dependencies() {
return dependencies;
}
}
/**
* The feature is used to test the dependency of the latest production that is not yet production ready.
*/
public
|
FV2
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java
|
{
"start": 1256,
"end": 2331
}
|
class ____ {
// Set up buffers that have some header and trailer before the
// actual data or checksums, to make sure the code handles
// buffer.position(), limit, etc correctly.
private static final int SUMS_OFFSET_IN_BUFFER = 3;
private static final int DATA_OFFSET_IN_BUFFER = 3;
private static final int DATA_TRAILER_IN_BUFFER = 3;
private static final int BYTES_PER_CHUNK = 512;
private static final DataChecksum.Type CHECKSUM_TYPES[] = {
DataChecksum.Type.CRC32, DataChecksum.Type.CRC32C
};
@Test
public void testBulkOps() throws Exception {
for (DataChecksum.Type type : CHECKSUM_TYPES) {
System.err.println(
"---- beginning tests with checksum type " + type + "----");
DataChecksum checksum = DataChecksum.newDataChecksum(
type, BYTES_PER_CHUNK);
for (boolean useDirect : new boolean[]{false, true}) {
doBulkTest(checksum, 1023, useDirect);
doBulkTest(checksum, 1024, useDirect);
doBulkTest(checksum, 1025, useDirect);
}
}
}
private static
|
TestDataChecksum
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1200/Issue1254.java
|
{
"start": 213,
"end": 704
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
A a = new A();
a._parentId = "001";
String test = JSON.toJSONString(a);
System.out.println(test);
assertEquals("{\"_parentId\":\"001\"}", test);
B b = new B();
b.set_parentId("001");
String testB = JSON.toJSONString(b);
System.out.println(testB);
assertEquals("{\"_parentId\":\"001\"}", testB);
}
public static
|
Issue1254
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/tools/picocli/CommandLine.java
|
{
"start": 73292,
"end": 74277
}
|
class ____ {
* @Option(names = {"-o", "--output"}, paramLabel="FILE", description="path of the output file")
* private File out;
* @Option(names = {"-j", "--jobs"}, arity="0..1", description="Allow N jobs at once; infinite jobs with no arg.")
* private int maxJobs = -1;
* }</pre>
* <p>By default, the above gives a usage help message like the following:</p><pre>
* Usage: <main class> [OPTIONS]
* -o, --output FILE path of the output file
* -j, --jobs [<maxJobs>] Allow N jobs at once; infinite jobs with no arg.
* </pre>
* @return name of the option parameter used in the usage help message
*/
String paramLabel() default "";
/** <p>
* Optionally specify a {@code type} to control exactly what Class the option parameter should be converted
* to. This may be useful when the field type is an
|
Example
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/version/sybase/Permission.java
|
{
"start": 242,
"end": 1153
}
|
class ____ {
private Integer id;
private Date timestamp;
private String name;
private String context;
private String access;
public Permission() {
}
public Permission(Integer id, String name, String context, String access) {
this.id = id;
this.name = name;
this.context = context;
this.access = access;
}
public Integer getId() {
return id;
}
protected void setId(Integer id) {
this.id = id;
}
public Date getTimestamp() {
return timestamp;
}
public void setTimestamp(Date timestamp) {
this.timestamp = timestamp;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getContext() {
return context;
}
public void setContext(String context) {
this.context = context;
}
public String getAccess() {
return access;
}
public void setAccess(String access) {
this.access = access;
}
}
|
Permission
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/spdy/client/HttpResponseClientHandler.java
|
{
"start": 1401,
"end": 3248
}
|
class ____ extends SimpleChannelInboundHandler<HttpObject> {
private final BlockingQueue<ChannelFuture> queue = new LinkedBlockingQueue<ChannelFuture>();
@Override
public void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception {
if (msg instanceof HttpResponse) {
HttpResponse response = (HttpResponse) msg;
System.out.println("STATUS: " + response.status());
System.out.println("VERSION: " + response.protocolVersion());
System.out.println();
if (!response.headers().isEmpty()) {
for (CharSequence name : response.headers().names()) {
for (CharSequence value : response.headers().getAll(name)) {
System.out.println("HEADER: " + name + " = " + value);
}
}
System.out.println();
}
if (HttpUtil.isTransferEncodingChunked(response)) {
System.out.println("CHUNKED CONTENT {");
} else {
System.out.println("CONTENT {");
}
}
if (msg instanceof HttpContent) {
HttpContent content = (HttpContent) msg;
System.out.print(content.content().toString(CharsetUtil.UTF_8));
System.out.flush();
if (content instanceof LastHttpContent) {
System.out.println("} END OF CONTENT");
queue.add(ctx.channel().newSucceededFuture());
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
queue.add(ctx.channel().newFailedFuture(cause));
cause.printStackTrace();
ctx.close();
}
public BlockingQueue<ChannelFuture> queue() {
return queue;
}
}
|
HttpResponseClientHandler
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client-jaxrs/deployment/src/main/java/io/quarkus/jaxrs/client/reactive/deployment/JaxrsClientReactiveEnricher.java
|
{
"start": 502,
"end": 993
}
|
interface ____ {
/**
* Class-level alterations
*
* Used by MicroProfile Rest Client implementation (quarkus-rest-client-reactive) to support
* {@link jakarta.ws.rs.ext.Provider}, {@code @ClientHeadersFactory}, etc
*
* Please note that this won't be invoked for sub-resources
*
* @param ctor jaxrs client constructor
* @param globalTarget WebTarget field of the jaxrs client
* @param interfaceClass JAXRS-annotated
|
JaxrsClientReactiveEnricher
|
java
|
apache__dubbo
|
dubbo-metrics/dubbo-tracing/src/main/java/org/apache/dubbo/tracing/handler/DubboClientTracingObservationHandler.java
|
{
"start": 1050,
"end": 1495
}
|
class ____<T extends DubboClientContext> implements ObservationHandler<T> {
private final Tracer tracer;
public DubboClientTracingObservationHandler(Tracer tracer) {
this.tracer = tracer;
}
@Override
public void onScopeOpened(T context) {}
@Override
public boolean supportsContext(Observation.Context context) {
return context instanceof DubboClientContext;
}
}
|
DubboClientTracingObservationHandler
|
java
|
grpc__grpc-java
|
util/src/test/java/io/grpc/util/OutlierDetectionLoadBalancerTest.java
|
{
"start": 3625,
"end": 55565
}
|
class ____ {
@Parameterized.Parameters(name = "{0}")
public static Iterable<Object[]> data() {
// Before and after dual stack
return Arrays.asList(new Object[][] {
{true}, {false}
});
}
@Parameterized.Parameter
public boolean hasHealthConsumer;
@Rule
public final MockitoRule mockitoRule = MockitoJUnit.rule();
@Mock
private LoadBalancer mockChildLb;
@Mock
private Helper mockHelper;
@Mock
private SocketAddress mockSocketAddress;
@Mock
private ClientStreamTracer.Factory mockStreamTracerFactory;
@Mock
private ClientStreamTracer mockStreamTracer;
@Captor
private ArgumentCaptor<ConnectivityState> connectivityStateCaptor;
@Captor
private ArgumentCaptor<SubchannelPicker> errorPickerCaptor;
@Captor
private ArgumentCaptor<SubchannelPicker> pickerCaptor;
@Captor
private ArgumentCaptor<ConnectivityState> stateCaptor;
private FakeLoadBalancer fakeChildLb;
private final LoadBalancerProvider mockChildLbProvider = new StandardLoadBalancerProvider(
"foo_policy") {
@Override
public LoadBalancer newLoadBalancer(Helper helper) {
return mockChildLb;
}
};
private final LoadBalancerProvider fakeLbProvider = new StandardLoadBalancerProvider(
"fake_policy") {
@Override
public LoadBalancer newLoadBalancer(Helper helper) {
if (fakeChildLb == null) {
fakeChildLb = new FakeLoadBalancer(helper);
}
return fakeChildLb;
}
};
private final LoadBalancerProvider roundRobinLbProvider = new StandardLoadBalancerProvider(
"round_robin") {
@Override
public LoadBalancer newLoadBalancer(Helper helper) {
return new RoundRobinLoadBalancer(helper);
}
};
private final FakeClock fakeClock = new FakeClock();
private final SynchronizationContext syncContext = new SynchronizationContext(
new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
throw new AssertionError(e);
}
});
private OutlierDetectionLoadBalancer loadBalancer;
private final List<EquivalentAddressGroup> servers = Lists.newArrayList();
private final Map<List<EquivalentAddressGroup>, Subchannel> subchannels = Maps.newLinkedHashMap();
private final Map<Subchannel, SubchannelStateListener> subchannelStateListeners
= Maps.newLinkedHashMap();
private final Map<EquivalentAddressGroup, SubchannelStateListener> healthListeners =
Maps.newLinkedHashMap();
private Subchannel subchannel1;
private Subchannel subchannel2;
private Subchannel subchannel3;
private Subchannel subchannel4;
private Subchannel subchannel5;
@Before
public void setUp() {
for (int i = 0; i < 5; i++) {
SocketAddress addr = new FakeSocketAddress("server" + i);
EquivalentAddressGroup eag = new EquivalentAddressGroup(addr);
servers.add(eag);
Subchannel sc = mock(Subchannel.class);
subchannels.put(Arrays.asList(eag), sc);
when(sc.getInternalSubchannel()).thenReturn(sc);
if (hasHealthConsumer) {
healthListeners.put(eag, mock(SubchannelStateListener.class));
}
}
Iterator<Subchannel> subchannelIterator = subchannels.values().iterator();
subchannel1 = subchannelIterator.next();
subchannel2 = subchannelIterator.next();
subchannel3 = subchannelIterator.next();
subchannel4 = subchannelIterator.next();
subchannel5 = subchannelIterator.next();
ChannelLogger channelLogger = mock(ChannelLogger.class);
when(mockHelper.getChannelLogger()).thenReturn(channelLogger);
when(mockHelper.getSynchronizationContext()).thenReturn(syncContext);
when(mockHelper.getScheduledExecutorService()).thenReturn(
fakeClock.getScheduledExecutorService());
when(mockHelper.createSubchannel(any(CreateSubchannelArgs.class))).then(
new Answer<Subchannel>() {
@Override
public Subchannel answer(InvocationOnMock invocation) throws Throwable {
CreateSubchannelArgs args = (CreateSubchannelArgs) invocation.getArguments()[0];
final Subchannel subchannel = subchannels.get(args.getAddresses());
when(subchannel.getChannelLogger()).thenReturn(channelLogger);
when(subchannel.getAllAddresses()).thenReturn(args.getAddresses());
when(subchannel.getAttributes()).thenReturn(args.getAttributes());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
subchannelStateListeners.put(subchannel,
(SubchannelStateListener) invocation.getArguments()[0]);
return null;
}
}).when(subchannel).start(any(SubchannelStateListener.class));
return subchannel;
}
});
when(mockStreamTracerFactory.newClientStreamTracer(any(),
any())).thenReturn(mockStreamTracer);
loadBalancer = new OutlierDetectionLoadBalancer(mockHelper, fakeClock.getTicker());
}
@Test
public void handleNameResolutionError_noChildLb() {
loadBalancer.handleNameResolutionError(Status.DEADLINE_EXCEEDED);
verify(mockHelper).updateBalancingState(connectivityStateCaptor.capture(),
errorPickerCaptor.capture());
assertThat(connectivityStateCaptor.getValue()).isEqualTo(ConnectivityState.TRANSIENT_FAILURE);
}
@Test
public void handleNameResolutionError_withChildLb() {
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(
new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, null)).build(),
new EquivalentAddressGroup(mockSocketAddress)));
loadBalancer.handleNameResolutionError(Status.DEADLINE_EXCEEDED);
verify(mockChildLb).handleNameResolutionError(Status.DEADLINE_EXCEEDED);
}
/**
* {@code shutdown()} is simply delegated.
*/
@Test
public void shutdown() {
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(
new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, null)).build(),
new EquivalentAddressGroup(mockSocketAddress)));
loadBalancer.shutdown();
verify(mockChildLb).shutdown();
}
/**
* Base case for accepting new resolved addresses.
*/
@Test
public void acceptResolvedAddresses() {
Object childConfig = "theConfig";
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, childConfig)).build();
ResolvedAddresses resolvedAddresses = buildResolvedAddress(config,
new EquivalentAddressGroup(mockSocketAddress));
loadBalancer.acceptResolvedAddresses(resolvedAddresses);
// Handling of resolved addresses is delegated
verify(mockChildLb).acceptResolvedAddresses(
resolvedAddresses.toBuilder().setLoadBalancingPolicyConfig(childConfig).build());
// There is a single pending task to run the outlier detection algorithm
assertThat(fakeClock.getPendingTasks()).hasSize(1);
// The task is scheduled to run after a delay set in the config.
ScheduledTask task = fakeClock.getPendingTasks().iterator().next();
assertThat(task.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(config.intervalNanos);
}
/**
* The child LB might recreate subchannels leaving the ones we are tracking
* orphaned in the address tracker. Make sure subchannels that are shut down get
* removed from the tracker.
*/
@Test
public void childLbRecreatesSubchannels() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers.get(0)));
assertThat(loadBalancer.endpointTrackerMap).hasSize(1);
EndpointTracker addressTracker =
(EndpointTracker) loadBalancer.endpointTrackerMap.values().toArray()[0];
assertThat(addressTracker).isNotNull();
OutlierDetectionSubchannel trackedSubchannel
= (OutlierDetectionSubchannel) addressTracker.getSubchannels().toArray()[0];
fakeChildLb.recreateSubchannels();
assertThat(addressTracker.getSubchannels()).doesNotContain(trackedSubchannel);
}
/**
* Outlier detection first enabled, then removed.
*/
@Test
public void acceptResolvedAddresses_outlierDetectionDisabled() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, null)).build();
ResolvedAddresses resolvedAddresses = buildResolvedAddress(config,
new EquivalentAddressGroup(mockSocketAddress));
loadBalancer.acceptResolvedAddresses(resolvedAddresses);
fakeClock.forwardTime(15, TimeUnit.SECONDS);
// There is a single pending task to run the outlier detection algorithm
assertThat(fakeClock.getPendingTasks()).hasSize(1);
config = new OutlierDetectionLoadBalancerConfig.Builder().setChildConfig(
newChildConfig(mockChildLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(
buildResolvedAddress(config, new EquivalentAddressGroup(mockSocketAddress)));
// Pending task should be gone since OD is disabled.
assertThat(fakeClock.getPendingTasks()).isEmpty();
}
/**
* Tests different scenarios when the timer interval in the config changes.
*/
@Test
public void acceptResolvedAddresses_intervalUpdate() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, null)).build();
ResolvedAddresses resolvedAddresses = buildResolvedAddress(config,
new EquivalentAddressGroup(mockSocketAddress));
loadBalancer.acceptResolvedAddresses(resolvedAddresses);
// Config update has doubled the interval
config = new OutlierDetectionLoadBalancerConfig.Builder()
.setIntervalNanos(config.intervalNanos * 2)
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(mockChildLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(
buildResolvedAddress(config, new EquivalentAddressGroup(mockSocketAddress)));
// If the timer has not run yet the task is just rescheduled to run after the new delay.
assertThat(fakeClock.getPendingTasks()).hasSize(1);
ScheduledTask task = fakeClock.getPendingTasks().iterator().next();
assertThat(task.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(config.intervalNanos);
assertThat(task.dueTimeNanos).isEqualTo(config.intervalNanos);
// The new interval time has passed. The next task due time should have been pushed back another
// interval.
forwardTime(config);
assertThat(fakeClock.getPendingTasks()).hasSize(1);
task = fakeClock.getPendingTasks().iterator().next();
assertThat(task.dueTimeNanos).isEqualTo(config.intervalNanos + config.intervalNanos + 1);
// Some time passes and a second update comes down, but now the timer has had a chance to run,
// the new delay to timer start should consider when the timer last ran and if the interval is
// not changing in the config, the next task due time should remain unchanged.
fakeClock.forwardTime(4, TimeUnit.SECONDS);
task = fakeClock.getPendingTasks().iterator().next();
loadBalancer.acceptResolvedAddresses(
buildResolvedAddress(config, new EquivalentAddressGroup(mockSocketAddress)));
assertThat(task.dueTimeNanos).isEqualTo(config.intervalNanos + config.intervalNanos + 1);
}
/**
* Confirm basic picking works by delegating to round_robin.
*/
@Test
public void delegatePick() throws Exception {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers.get(0)));
// Make one of the subchannels READY.
final Subchannel readySubchannel = subchannels.values().iterator().next();
deliverSubchannelState(readySubchannel, ConnectivityStateInfo.forNonError(READY));
verify(mockHelper, times(3)).updateBalancingState(stateCaptor.capture(),
pickerCaptor.capture());
// Make sure that we can pick the single READY subchannel.
SubchannelPicker picker = pickerCaptor.getAllValues().get(2);
PickResult pickResult = picker.pickSubchannel(mock(PickSubchannelArgs.class));
Subchannel s = ((OutlierDetectionSubchannel) pickResult.getSubchannel()).delegate();
if (s instanceof HealthProducerHelper.HealthProducerSubchannel) {
s = ((HealthProducerHelper.HealthProducerSubchannel) s).delegate();
}
assertThat(s).isEqualTo(readySubchannel);
}
/**
* Any ClientStreamTracer.Factory set by the delegate picker should still get used.
*/
@Test
public void delegatePickTracerFactoryPreserved() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers.get(0)));
// Make one of the subchannels READY.
final Subchannel readySubchannel = subchannels.values().iterator().next();
deliverSubchannelState(readySubchannel, ConnectivityStateInfo.forNonError(READY));
verify(mockHelper, times(2)).updateBalancingState(stateCaptor.capture(),
pickerCaptor.capture());
if (hasHealthConsumer) {
SubchannelStateListener healthListener = healthListeners.get(servers.get(0));
verify(healthListener).onSubchannelState(ConnectivityStateInfo.forNonError(READY));
}
// Make sure that we can pick the single READY subchannel.
SubchannelPicker picker = pickerCaptor.getAllValues().get(1);
PickResult pickResult = picker.pickSubchannel(mock(PickSubchannelArgs.class));
// Calls to a stream tracer created with the factory in the result should make it to a stream
// tracer the underlying LB/picker is using.
ClientStreamTracer clientStreamTracer = pickResult.getStreamTracerFactory()
.newClientStreamTracer(ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
clientStreamTracer.inboundHeaders();
// The underlying fake LB provider is configured with a factory that returns a mock stream
// tracer.
verify(mockStreamTracer).inboundHeaders();
}
/**
* Assure the tracer works even when the underlying LB does not have a tracer to delegate to.
*/
@Test
public void delegatePickTracerFactoryNotSet() throws Exception {
// We set the mock factory to null to indicate that the delegate does not have its own tracer.
mockStreamTracerFactory = null;
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setSuccessRateEjection(new SuccessRateEjection.Builder().build())
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers.get(0)));
// Make one of the subchannels READY.
final Subchannel readySubchannel = subchannels.values().iterator().next();
deliverSubchannelState(readySubchannel, ConnectivityStateInfo.forNonError(READY));
verify(mockHelper, times(2)).updateBalancingState(stateCaptor.capture(),
pickerCaptor.capture());
// Make sure that we can pick the single READY subchannel.
SubchannelPicker picker = pickerCaptor.getAllValues().get(1);
PickResult pickResult = picker.pickSubchannel(mock(PickSubchannelArgs.class));
// With no delegate tracers factory a call to the OD tracer should still work
ClientStreamTracer clientStreamTracer = pickResult.getStreamTracerFactory()
.newClientStreamTracer(ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
clientStreamTracer.inboundHeaders();
// Sanity check to make sure the delegate tracer does not get called.
verifyNoInteractions(mockStreamTracer);
}
/**
* The success rate algorithm leaves a healthy set of addresses alone.
*/
@Test
public void successRateNoOutliers() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder().setMinimumHosts(3).setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// No outliers, no ejections.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The success rate algorithm ejects the outlier.
*/
@Test
public void successRateOneOutlier() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
}
/**
* The success rate algorithm ejects the outlier, but then the config changes so that similar
* behavior no longer gets ejected.
*/
@Test
public void successRateOneOutlier_configChange() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
// New config sets enforcement percentage to 0.
config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
// The PickFirstLeafLB has an extra level of indirection because of health
int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 8 : 12;
generateLoad(ImmutableMap.of(subchannel2, Status.DEADLINE_EXCEEDED), expectedStateChanges);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// Since we brought enforcement percentage to 0, no additional ejection should have happened.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
}
/**
* The success rate algorithm ejects the outlier but after some time it should get unejected
* if it stops being an outlier..
*/
@Test
public void successRateOneOutlier_unejected() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
fakeClock.forwardTime(config.intervalNanos + 1, TimeUnit.NANOSECONDS);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
// Now we produce more load, but the subchannel has started working and is no longer an outlier.
int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 8 : 12;
generateLoad(ImmutableMap.of(), expectedStateChanges);
// Move forward in time to a point where the detection timer has fired.
fakeClock.forwardTime(config.maxEjectionTimeNanos + 1, TimeUnit.NANOSECONDS);
// No subchannels should remain ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The success rate algorithm ignores addresses without enough volume.
*/
@Test
public void successRateOneOutlier_notEnoughVolume() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(20).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
// We produce an outlier, but don't give it enough calls to reach the minimum volume.
generateLoad(
ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED),
ImmutableMap.of(subchannel1, 19), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The address should not have been ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The success rate algorithm does not apply if we don't have enough addresses that have the
* required volume.
*/
@Test
public void successRateOneOutlier_notEnoughAddressesWithVolume() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(5)
.setRequestVolume(20).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(
ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED),
// subchannel2 has only 19 calls which results in success rate not triggering.
ImmutableMap.of(subchannel2, 19),
7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// No subchannels should have been ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The enforcementPercentage configuration should be honored.
*/
@Test
public void successRateOneOutlier_enforcementPercentage() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0)
.build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// There is one outlier, but because enforcementPercentage is 0, nothing should be ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* Two outliers get ejected.
*/
@Test
public void successRateTwoOutliers() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setStdevFactor(1).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(
subchannel1, Status.DEADLINE_EXCEEDED,
subchannel2, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.of(servers.get(0).getAddresses().get(0)),
ImmutableSet.of(servers.get(1).getAddresses().get(0))));
}
/**
* Three outliers, second one ejected even if ejecting it goes above the max ejection percentage,
* as this matches Envoy behavior. The third one should not get ejected.
*/
@Test
public void successRateThreeOutliers_maxEjectionPercentage() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(30)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setStdevFactor(1).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(
subchannel1, Status.DEADLINE_EXCEEDED,
subchannel2, Status.DEADLINE_EXCEEDED,
subchannel3, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
int totalEjected = 0;
for (EquivalentAddressGroup addressGroup: servers) {
totalEjected +=
loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(addressGroup.getAddresses().get(0))).subchannelsEjected() ? 1 : 0;
}
assertThat(totalEjected).isEqualTo(2);
}
/**
* The success rate algorithm leaves a healthy set of addresses alone.
*/
@Test
public void failurePercentageNoOutliers() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
// By default all calls will return OK.
generateLoad(ImmutableMap.of(), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// No outliers, no ejections.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The success rate algorithm ejects the outlier.
*/
@Test
public void failurePercentageOneOutlier() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
}
/**
* The failure percentage algorithm ignores addresses without enough volume..
*/
@Test
public void failurePercentageOneOutlier_notEnoughVolume() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(100).build()) // We won't produce this much volume...
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// We should see no ejections.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The failure percentage algorithm does not apply if we don't have enough addresses that have the
* required volume.
*/
@Test
public void failurePercentageOneOutlier_notEnoughAddressesWithVolume() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(5)
.setRequestVolume(20).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(
ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED),
// subchannel2 has only 19 calls which results in failure percentage not triggering.
ImmutableMap.of(subchannel2, 19),
7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// No subchannels should have been ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/**
* The enforcementPercentage configuration should be honored.
*/
@Test
public void failurePercentageOneOutlier_enforcementPercentage() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0)
.build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// There is one outlier, but because enforcementPercentage is 0, nothing should be ejected.
assertEjectedSubchannels(ImmutableSet.of());
}
/** Success rate detects two outliers and error percentage three. */
@Test
public void successRateAndFailurePercentageThreeOutliers() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(100)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setStdevFactor(1).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setThreshold(0)
.setMinimumHosts(3)
.setRequestVolume(1)
.build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
// Three subchannels with problems, but one only has a single call that failed.
// This is not enough for success rate to catch, but failure percentage is
// configured with a 0 tolerance threshold.
generateLoad(
ImmutableMap.of(
subchannel1, Status.DEADLINE_EXCEEDED,
subchannel2, Status.DEADLINE_EXCEEDED,
subchannel3, Status.DEADLINE_EXCEEDED),
ImmutableMap.of(subchannel3, 1), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// Should see thee ejected, success rate cathes the first two, error percentage the
// same two plus the subchannel with the single failure.
assertEjectedSubchannels(ImmutableSet.of(
ImmutableSet.of(servers.get(0).getAddresses().get(0)),
ImmutableSet.of(servers.get(1).getAddresses().get(0)),
ImmutableSet.of(servers.get(2).getAddresses().get(0))));
}
/**
* When the address a subchannel is associated with changes it should get tracked under the new
* address and its ejection state should match what the address has.
*/
@Test
public void subchannelUpdateAddress_singleReplaced() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
EquivalentAddressGroup oldAddressGroup = servers.get(0);
EndpointTracker oldAddressTracker = loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(oldAddressGroup.getAddresses().get(0)));
EquivalentAddressGroup newAddressGroup = servers.get(1);
EndpointTracker newAddressTracker = loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(newAddressGroup.getAddresses().get(0)));
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(
ImmutableSet.of(oldAddressGroup.getAddresses().get(0))));
// The ejected subchannel gets updated with another address in the map that is not ejected
OutlierDetectionSubchannel subchannel = oldAddressTracker.getSubchannels()
.iterator().next();
subchannel.updateAddresses(ImmutableList.of(newAddressGroup));
// The replaced address should no longer have the subchannel associated with it.
assertThat(oldAddressTracker.getSubchannels()).doesNotContain(subchannel);
// The new address should instead have the subchannel.
assertThat(newAddressTracker.getSubchannels()).contains(subchannel);
// Since the new address is not ejected, the ejected subchannel moving over to it should also
// become unejected.
assertThat(subchannel.isEjected()).isFalse();
}
@Test
public void multipleAddressesEndpoint() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
EquivalentAddressGroup manyAddEndpoint = new EquivalentAddressGroup(Arrays.asList(
servers.get(0).getAddresses().get(0), servers.get(1).getAddresses().get(0)));
List<EquivalentAddressGroup> manyAddEndpointServer = ImmutableList.of(manyAddEndpoint);
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, manyAddEndpointServer));
assertThat(loadBalancer.endpointTrackerMap.size()).isEqualTo(1);
assertThat(loadBalancer.addressMap.size()).isEqualTo(2);
manyAddEndpoint = new EquivalentAddressGroup(Arrays.asList(
servers.get(0).getAddresses().get(0), servers.get(1).getAddresses().get(0)));
EquivalentAddressGroup manyAddEndpoint2 = new EquivalentAddressGroup(Arrays.asList(
servers.get(2).getAddresses().get(0), servers.get(3).getAddresses().get(0)));
EquivalentAddressGroup singleAddressEndpoint = new EquivalentAddressGroup(Arrays.asList(
servers.get(4).getAddresses().get(0)));
manyAddEndpointServer = ImmutableList.of(
manyAddEndpoint, manyAddEndpoint2, singleAddressEndpoint);
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, manyAddEndpointServer));
assertThat(loadBalancer.endpointTrackerMap.size()).isEqualTo(3);
assertThat(loadBalancer.addressMap.size()).isEqualTo(5);
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED,
subchannel2, Status.DEADLINE_EXCEEDED), 13);
forwardTime(config);
// eject the first endpoint: (address0, address1)
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.of(
servers.get(0).getAddresses().get(0), servers.get(1).getAddresses().get(0))));
}
/**
* If a single address gets replaced by multiple, the subchannel becomes uneligible for outlier
* detection.
*/
@Test
public void subchannelUpdateAddress_singleReplacedWithMultiple() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
EquivalentAddressGroup oldAddressGroup = servers.get(0);
EndpointTracker oldAddressTracker = loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(oldAddressGroup.getAddresses().get(0)));
EquivalentAddressGroup newAddress1 = servers.get(1);
EquivalentAddressGroup newAddress2 = servers.get(2);
OutlierDetectionSubchannel subchannel = oldAddressTracker.getSubchannels()
.iterator().next();
// The subchannel gets updated with two new addresses
ImmutableList<EquivalentAddressGroup> addressUpdate
= ImmutableList.of(newAddress1, newAddress2);
subchannel.updateAddresses(addressUpdate);
when(subchannel1.getAllAddresses()).thenReturn(addressUpdate);
// The replaced address should no longer be tracked.
assertThat(oldAddressTracker.getSubchannels()).doesNotContain(subchannel);
// The old tracker should also have its call counters cleared.
assertThat(oldAddressTracker.activeVolume()).isEqualTo(0);
assertThat(oldAddressTracker.inactiveVolume()).isEqualTo(0);
}
/**
* A subchannel with multiple addresses will again become eligible for outlier detection if it
* receives an update with a single address.
*/
@Test
public void subchannelUpdateAddress_multipleReplacedWithSingle() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 6);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
EquivalentAddressGroup oldAddressGroup = servers.get(0);
EndpointTracker oldAddressTracker = loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(oldAddressGroup.getAddresses().get(0)));
EquivalentAddressGroup newAddressGroup1 = servers.get(1);
EndpointTracker newAddressTracker1 = loadBalancer.endpointTrackerMap.get(
ImmutableSet.of(newAddressGroup1.getAddresses().get(0)));
EquivalentAddressGroup newAddressGroup2 = servers.get(2);
// The old subchannel was returning errors and should be ejected.
assertEjectedSubchannels(ImmutableSet.of(
ImmutableSet.of(oldAddressGroup.getAddresses().get(0))));
OutlierDetectionSubchannel subchannel = oldAddressTracker.getSubchannels()
.iterator().next();
// The subchannel gets updated with two new addresses
ImmutableList<EquivalentAddressGroup> addressUpdate
= ImmutableList.of(newAddressGroup1, newAddressGroup2);
subchannel.updateAddresses(addressUpdate);
when(subchannel1.getAllAddresses()).thenReturn(addressUpdate);
// The replaced address should no longer be tracked.
assertThat(oldAddressTracker.getSubchannels()).doesNotContain(subchannel);
// The old tracker should also have its call counters cleared.
assertThat(oldAddressTracker.activeVolume()).isEqualTo(0);
assertThat(oldAddressTracker.inactiveVolume()).isEqualTo(0);
// Another update takes the subchannel back to a single address.
addressUpdate = ImmutableList.of(newAddressGroup1);
subchannel.updateAddresses(addressUpdate);
when(subchannel1.getAllAddresses()).thenReturn(addressUpdate);
// The subchannel is now associated with the single new address.
assertThat(newAddressTracker1.getSubchannels()).contains(subchannel);
// The previously ejected subchannel should become unejected as it is now associated with an
// unejected address.
assertThat(subchannel.isEjected()).isFalse();
}
/** Both algorithms configured, but no outliers. */
@Test
public void successRateAndFailurePercentage_noOutliers() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// No outliers, no ejections.
assertEjectedSubchannels(ImmutableSet.of());
}
/** Both algorithms configured, success rate detects an outlier. */
@Test
public void successRateAndFailurePercentage_successRateOutlier() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0).build()) // Configured, but not enforcing.
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
for (SubchannelStateListener healthListener : healthListeners.values()) {
verifyNoInteractions(healthListener);
}
}
@Test
public void successRateAndFailurePercentage_successRateOutlier_() { // with health listener
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0).build()) // Configured, but not enforcing.
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 6);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
if (hasHealthConsumer) {
ArgumentCaptor<ConnectivityStateInfo> csiCaptor = ArgumentCaptor.forClass(
ConnectivityStateInfo.class);
verify(healthListeners.get(servers.get(0)), times(2)).onSubchannelState(csiCaptor.capture());
List<ConnectivityStateInfo> connectivityStateInfos = csiCaptor.getAllValues();
// The subchannel went through two state transitions...
assertThat(connectivityStateInfos).hasSize(2);
// ...it first went to the READY state...
assertThat(connectivityStateInfos.get(0).getState()).isEqualTo(READY);
// ...and then to TRANSIENT_FAILURE as outlier detection ejected it.
assertThat(connectivityStateInfos.get(1).getState()).isEqualTo(TRANSIENT_FAILURE);
assertThat(connectivityStateInfos.get(1).getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(connectivityStateInfos.get(1).getStatus().getDescription()).isEqualTo(
"The subchannel has been ejected by outlier detection");
}
}
/** Both algorithms configured, error percentage detects an outlier. */
@Test
public void successRateAndFailurePercentage_errorPercentageOutlier() {
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build()) // Configured, but not enforcing.
.setChildConfig(newChildConfig(roundRobinLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 7);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
for (SubchannelStateListener healthListener : healthListeners.values()) {
verifyNoInteractions(healthListener);
}
}
@Test
public void successRateAndFailurePercentage_errorPercentageOutlier_() { // with health listener
OutlierDetectionLoadBalancerConfig config = new OutlierDetectionLoadBalancerConfig.Builder()
.setMaxEjectionPercent(50)
.setSuccessRateEjection(
new SuccessRateEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10)
.setEnforcementPercentage(0).build())
.setFailurePercentageEjection(
new FailurePercentageEjection.Builder()
.setMinimumHosts(3)
.setRequestVolume(10).build()) // Configured, but not enforcing.
.setChildConfig(newChildConfig(fakeLbProvider, null)).build();
loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers));
generateLoad(ImmutableMap.of(subchannel1, Status.DEADLINE_EXCEEDED), 6);
// Move forward in time to a point where the detection timer has fired.
forwardTime(config);
// The one subchannel that was returning errors should be ejected.
assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses())));
if (hasHealthConsumer) {
ArgumentCaptor<ConnectivityStateInfo> csiCaptor = ArgumentCaptor.forClass(
ConnectivityStateInfo.class);
verify(healthListeners.get(servers.get(0)), times(2)).onSubchannelState(csiCaptor.capture());
List<ConnectivityStateInfo> connectivityStateInfos = csiCaptor.getAllValues();
// The subchannel went through two state transitions...
assertThat(connectivityStateInfos).hasSize(2);
// ...it first went to the READY state...
assertThat(connectivityStateInfos.get(0).getState()).isEqualTo(READY);
// ...and then to TRANSIENT_FAILURE as outlier detection ejected it.
assertThat(connectivityStateInfos.get(1).getState()).isEqualTo(TRANSIENT_FAILURE);
assertThat(connectivityStateInfos.get(1).getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(connectivityStateInfos.get(1).getStatus().getDescription()).isEqualTo(
"The subchannel has been ejected by outlier detection");
}
}
@Test
public void mathChecksOut() {
ImmutableList<Double> values = ImmutableList.of(600d, 470d, 170d, 430d, 300d);
double mean = SuccessRateOutlierEjectionAlgorithm.mean(values);
double stdev = SuccessRateOutlierEjectionAlgorithm.standardDeviation(values, mean);
assertThat(mean).isEqualTo(394);
assertThat(stdev).isEqualTo(147.32277488562318);
}
private static
|
OutlierDetectionLoadBalancerTest
|
java
|
grpc__grpc-java
|
services/src/main/java/io/grpc/protobuf/services/BinaryLogSink.java
|
{
"start": 862,
"end": 1007
}
|
interface ____ extends Closeable {
/**
* Writes the {@code message} to the destination.
*/
void write(MessageLite message);
}
|
BinaryLogSink
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/google/SortedMapGenerators.java
|
{
"start": 4755,
"end": 4905
}
|
class ____ static utility methods.
*
* @deprecated Do not instantiate this utility class.
*/
@Deprecated
public SortedMapGenerators() {}
}
|
of
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/jmx/export/metadata/ManagedMetric.java
|
{
"start": 1158,
"end": 3192
}
|
class ____ extends AbstractJmxAttribute {
private @Nullable String category;
private @Nullable String displayName;
private MetricType metricType = MetricType.GAUGE;
private int persistPeriod = -1;
private @Nullable String persistPolicy;
private @Nullable String unit;
/**
* The category of this metric (ex. throughput, performance, utilization).
*/
public void setCategory(@Nullable String category) {
this.category = category;
}
/**
* The category of this metric (ex. throughput, performance, utilization).
*/
public @Nullable String getCategory() {
return this.category;
}
/**
* A display name for this metric.
*/
public void setDisplayName(@Nullable String displayName) {
this.displayName = displayName;
}
/**
* A display name for this metric.
*/
public @Nullable String getDisplayName() {
return this.displayName;
}
/**
* A description of how this metric's values change over time.
*/
public void setMetricType(MetricType metricType) {
Assert.notNull(metricType, "MetricType must not be null");
this.metricType = metricType;
}
/**
* A description of how this metric's values change over time.
*/
public MetricType getMetricType() {
return this.metricType;
}
/**
* The persist period for this metric.
*/
public void setPersistPeriod(int persistPeriod) {
this.persistPeriod = persistPeriod;
}
/**
* The persist period for this metric.
*/
public int getPersistPeriod() {
return this.persistPeriod;
}
/**
* The persist policy for this metric.
*/
public void setPersistPolicy(@Nullable String persistPolicy) {
this.persistPolicy = persistPolicy;
}
/**
* The persist policy for this metric.
*/
public @Nullable String getPersistPolicy() {
return this.persistPolicy;
}
/**
* The expected unit of measurement values.
*/
public void setUnit(@Nullable String unit) {
this.unit = unit;
}
/**
* The expected unit of measurement values.
*/
public @Nullable String getUnit() {
return this.unit;
}
}
|
ManagedMetric
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/utils/Md5ComparatorDelegate.java
|
{
"start": 1160,
"end": 3119
}
|
class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(Md5ComparatorDelegate.class);
private static final Md5ComparatorDelegate INSTANCE = new Md5ComparatorDelegate();
private String md5ComparatorType = EnvUtil.getProperty("nacos.config.cache.type", "nacos");
private Md5Comparator md5Comparator;
private Md5ComparatorDelegate() {
Collection<Md5Comparator> md5Comparators = NacosServiceLoader.load(Md5Comparator.class);
for (Md5Comparator each : md5Comparators) {
if (StringUtils.isEmpty(each.getName())) {
LOGGER.warn(
"[Md5ComparatorDelegate] Load Md5Comparator({}) Md5ComparatorName(null/empty) fail. Please add Md5ComparatorName to resolve",
each.getClass().getName());
continue;
}
LOGGER.info("[Md5ComparatorDelegate] Load Md5Comparator({}) Md5ComparatorName({}) successfully.",
each.getClass().getName(), each.getName());
if (StringUtils.equals(md5ComparatorType, each.getName())) {
LOGGER.info("[Md5ComparatorDelegate] Matched Md5Comparator found,set md5Comparator={}",
each.getClass().getName());
md5Comparator = each;
}
}
if (md5Comparator == null) {
LOGGER.info(
"[Md5ComparatorDelegate] Matched Md5Comparator not found, load Default NacosMd5Comparator successfully");
md5Comparator = new NacosMd5Comparator();
}
}
public static Md5ComparatorDelegate getInstance() {
return INSTANCE;
}
public Map<String, ConfigListenState> compareMd5(HttpServletRequest request, HttpServletResponse response,
Map<String, ConfigListenState> clientMd5Map) {
return md5Comparator.compareMd5(request, response, clientMd5Map);
}
}
|
Md5ComparatorDelegate
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheEvictOperation.java
|
{
"start": 1444,
"end": 2118
}
|
class ____ extends CacheOperation.Builder {
private boolean cacheWide = false;
private boolean beforeInvocation = false;
public void setCacheWide(boolean cacheWide) {
this.cacheWide = cacheWide;
}
public void setBeforeInvocation(boolean beforeInvocation) {
this.beforeInvocation = beforeInvocation;
}
@Override
protected StringBuilder getOperationDescription() {
StringBuilder sb = super.getOperationDescription();
sb.append(',');
sb.append(this.cacheWide);
sb.append(',');
sb.append(this.beforeInvocation);
return sb;
}
@Override
public CacheEvictOperation build() {
return new CacheEvictOperation(this);
}
}
}
|
Builder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
|
{
"start": 86257,
"end": 131624
}
|
class ____ implements
RemoteIterator<SnapshotDiffReportListing> {
private final String snapshotDir;
private final String fromSnapshot;
private final String toSnapshot;
private byte[] startPath;
private int index;
private boolean hasNext = true;
private SnapshotDiffReportListingIterator(String snapshotDir,
String fromSnapshot, String toSnapshot) {
this.snapshotDir = snapshotDir;
this.fromSnapshot = fromSnapshot;
this.toSnapshot = toSnapshot;
this.startPath = DFSUtilClient.EMPTY_BYTES;
this.index = -1;
}
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public SnapshotDiffReportListing next() throws IOException {
if (!hasNext) {
throw new java.util.NoSuchElementException(
"No more entry in SnapshotDiffReport for " + snapshotDir);
}
final SnapshotDiffReportListing part =
dfs.getSnapshotDiffReportListing(snapshotDir, fromSnapshot,
toSnapshot, startPath, index);
startPath = part.getLastPath();
index = part.getLastIndex();
hasNext =
!(Arrays.equals(startPath, DFSUtilClient.EMPTY_BYTES) && index == -1);
return part;
}
}
private SnapshotDiffReport getSnapshotDiffReportInternal(
final String snapshotDir, final String fromSnapshot,
final String toSnapshot) throws IOException {
return DFSUtilClient.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot,
dfs::getSnapshotDiffReport, dfs::getSnapshotDiffReportListing);
}
/**
* Get the difference between two snapshots, or between a snapshot and the
* current tree of a directory.
*
* @see DFSClient#getSnapshotDiffReportListing
*/
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
Path absF = fixRelativePart(snapshotDir);
return new FileSystemLinkResolver<SnapshotDiffReport>() {
@Override
public SnapshotDiffReport doCall(final Path p)
throws IOException {
return getSnapshotDiffReportInternal(getPathName(p), fromSnapshot,
toSnapshot);
}
@Override
public SnapshotDiffReport next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
myDfs.getSnapshotDiffReport(p, fromSnapshot, toSnapshot);
} else {
throw new UnsupportedOperationException("Cannot perform snapshot"
+ " operations on a symlink to a non-DistributedFileSystem: "
+ snapshotDir + " -> " + p);
}
return null;
}
}.resolve(this, absF);
}
/**
* Get the difference between two snapshots of a directory iteratively.
*
* @param snapshotDir full path of the directory where snapshots are taken.
* @param fromSnapshotName snapshot name of the from point. Null indicates the current tree.
* @param toSnapshotName snapshot name of the to point. Null indicates the current tree.
* @param snapshotDiffStartPath path relative to the snapshottable root directory from where
* the snapshotdiff computation needs to start.
* @param snapshotDiffIndex index in the created or deleted list of the directory at which the
* snapshotdiff computation stopped during the last rpc call. -1 indicates the diff
* computation needs to start right from the start path.
* @return the difference report represented as a {@link SnapshotDiffReportListing}.
* @throws IOException if an I/O error occurred.
*/
public SnapshotDiffReportListing getSnapshotDiffReportListing(Path snapshotDir,
String fromSnapshotName, String toSnapshotName, String snapshotDiffStartPath,
int snapshotDiffIndex) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
Path absF = fixRelativePart(snapshotDir);
return new FileSystemLinkResolver<SnapshotDiffReportListing>() {
@Override
public SnapshotDiffReportListing doCall(final Path p) throws IOException {
return dfs.getSnapshotDiffReportListing(getPathName(p), fromSnapshotName, toSnapshotName,
DFSUtilClient.string2Bytes(snapshotDiffStartPath), snapshotDiffIndex);
}
@Override
public SnapshotDiffReportListing next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem distributedFileSystem = (DistributedFileSystem)fs;
distributedFileSystem.getSnapshotDiffReportListing(p, fromSnapshotName, toSnapshotName,
snapshotDiffStartPath, snapshotDiffIndex);
} else {
throw new UnsupportedOperationException("Cannot perform snapshot"
+ " operations on a symlink to a non-DistributedFileSystem: "
+ snapshotDir + " -> " + p);
}
return null;
}
}.resolve(this, absF);
}
/**
* Get the close status of a file
* @param src The path to the file
*
* @return return true if file is closed
* @throws FileNotFoundException if the file does not exist.
* @throws IOException If an I/O error occurred
*/
@Override
public boolean isFileClosed(final Path src) throws IOException {
Path absF = fixRelativePart(src);
return new FileSystemLinkResolver<Boolean>() {
@Override
public Boolean doCall(final Path p) throws IOException {
return dfs.isFileClosed(getPathName(p));
}
@Override
public Boolean next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.isFileClosed(p);
} else {
throw new UnsupportedOperationException("Cannot call isFileClosed"
+ " on a symlink to a non-DistributedFileSystem: "
+ src + " -> " + p);
}
}
}.resolve(this, absF);
}
/**
* @see #addCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public long addCacheDirective(CacheDirectiveInfo info) throws IOException {
return addCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
}
/**
* Add a new CacheDirective.
*
* @param info Information about a directive to add.
* @param flags {@link CacheFlag}s to use for this operation.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ADD_CACHE_DIRECTIVE);
Preconditions.checkNotNull(info.getPath());
Path path = new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory());
return dfs.addCacheDirective(
new CacheDirectiveInfo.Builder(info).
setPath(path).
build(),
flags);
}
/**
* @see #modifyCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public void modifyCacheDirective(CacheDirectiveInfo info) throws IOException {
modifyCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
}
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify. You must set the ID
* to indicate which CacheDirective you want to modify.
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.MODIFY_CACHE_DIRECTIVE);
if (info.getPath() != null) {
info = new CacheDirectiveInfo.Builder(info).
setPath(new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory())).build();
}
dfs.modifyCacheDirective(info, flags);
}
/**
* Remove a CacheDirectiveInfo.
*
* @param id identifier of the CacheDirectiveInfo to remove
* @throws IOException if the directive could not be removed
*/
public void removeCacheDirective(long id)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_CACHE_DIRECTIVE);
dfs.removeCacheDirective(id);
}
/**
* List cache directives. Incrementally fetches results from the server.
*
* @param filter Filter parameters to use when listing the directives, null to
* list all directives visible to us.
* @return A RemoteIterator which returns CacheDirectiveInfo objects.
*/
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_CACHE_DIRECTIVE);
if (filter == null) {
filter = new CacheDirectiveInfo.Builder().build();
}
if (filter.getPath() != null) {
filter = new CacheDirectiveInfo.Builder(filter).
setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
build();
}
final RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(filter);
return new RemoteIterator<CacheDirectiveEntry>() {
@Override
public boolean hasNext() throws IOException {
return iter.hasNext();
}
@Override
public CacheDirectiveEntry next() throws IOException {
// Although the paths we get back from the NameNode should always be
// absolute, we call makeQualified to add the scheme and authority of
// this DistributedFilesystem.
CacheDirectiveEntry desc = iter.next();
CacheDirectiveInfo info = desc.getInfo();
Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
return new CacheDirectiveEntry(
new CacheDirectiveInfo.Builder(info).setPath(p).build(),
desc.getStats());
}
};
}
/**
* Add a cache pool.
*
* @param info
* The request to add a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void addCachePool(CachePoolInfo info) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ADD_CACHE_POOL);
CachePoolInfo.validate(info);
dfs.addCachePool(info);
}
/**
* Modify an existing cache pool.
*
* @param info
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void modifyCachePool(CachePoolInfo info) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.MODIFY_CACHE_POOL);
CachePoolInfo.validate(info);
dfs.modifyCachePool(info);
}
/**
* Remove a cache pool.
*
* @param poolName
* Name of the cache pool to remove.
* @throws IOException
* if the cache pool did not exist, or could not be removed.
*/
public void removeCachePool(String poolName) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_CACHE_POOL);
CachePoolInfo.validateName(poolName);
dfs.removeCachePool(poolName);
}
/**
* List all cache pools.
*
* @return A remote iterator from which you can get CachePoolEntry objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_CACHE_POOL);
return dfs.listCachePools();
}
/**
* {@inheritDoc}
*/
@Override
public void modifyAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.MODIFY_ACL_ENTRIES);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.modifyAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_ACL_ENTRIES);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeDefaultAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_DEFAULT_ACL);
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_ACL);
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void setAcl(Path path, final List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_ACL);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.setAcl(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public AclStatus getAclStatus(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<AclStatus>() {
@Override
public AclStatus doCall(final Path p) throws IOException {
return dfs.getAclStatus(getPathName(p));
}
@Override
public AclStatus next(final FileSystem fs, final Path p)
throws IOException {
return fs.getAclStatus(p);
}
}.resolve(this, absF);
}
/* HDFS only */
public void createEncryptionZone(final Path path, final String keyName)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CREATE_ENCRYPTION_ZONE);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.createEncryptionZone(getPathName(p), keyName);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.createEncryptionZone(p, keyName);
return null;
} else {
throw new UnsupportedOperationException(
"Cannot call createEncryptionZone"
+ " on a symlink to a non-DistributedFileSystem: " + path
+ " -> " + p);
}
}
}.resolve(this, absF);
}
/* HDFS only */
public EncryptionZone getEZForPath(final Path path)
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_ENCRYPTION_ZONE);
Preconditions.checkNotNull(path);
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<EncryptionZone>() {
@Override
public EncryptionZone doCall(final Path p) throws IOException {
return dfs.getEZForPath(getPathName(p));
}
@Override
public EncryptionZone next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getEZForPath(p);
} else {
throw new UnsupportedOperationException(
"Cannot call getEZForPath"
+ " on a symlink to a non-DistributedFileSystem: " + path
+ " -> " + p);
}
}
}.resolve(this, absF);
}
/* HDFS only */
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_ENCRYPTION_ZONE);
return dfs.listEncryptionZones();
}
/* HDFS only */
public void reencryptEncryptionZone(final Path zone,
final ReencryptAction action) throws IOException {
final Path absF = fixRelativePart(zone);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.reencryptEncryptionZone(getPathName(p), action);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.reencryptEncryptionZone(p, action);
return null;
}
throw new UnsupportedOperationException(
"Cannot call reencryptEncryptionZone"
+ " on a symlink to a non-DistributedFileSystem: " + zone
+ " -> " + p);
}
}.resolve(this, absF);
}
/* HDFS only */
public RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus()
throws IOException {
return dfs.listReencryptionStatus();
}
/* HDFS only */
public FileEncryptionInfo getFileEncryptionInfo(final Path path)
throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<FileEncryptionInfo>() {
@Override
public FileEncryptionInfo doCall(final Path p) throws IOException {
final HdfsFileStatus fi = dfs.getFileInfo(getPathName(p));
if (fi == null) {
throw new FileNotFoundException("File does not exist: " + p);
}
return fi.getFileEncryptionInfo();
}
@Override
public FileEncryptionInfo next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.getFileEncryptionInfo(p);
}
throw new UnsupportedOperationException(
"Cannot call getFileEncryptionInfo"
+ " on a symlink to a non-DistributedFileSystem: " + path
+ " -> " + p);
}
}.resolve(this, absF);
}
/* HDFS only */
public void provisionEZTrash(final Path path,
final FsPermission trashPermission) throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(Path p) throws IOException {
provisionEZTrash(getPathName(p), trashPermission);
return null;
}
@Override
public Void next(FileSystem fs, Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
myDfs.provisionEZTrash(p, trashPermission);
return null;
}
throw new UnsupportedOperationException("Cannot provisionEZTrash " +
"through a symlink to a non-DistributedFileSystem: " + fs + " -> "
+ p);
}
}.resolve(this, absF);
}
private void provisionEZTrash(String path, FsPermission trashPermission)
throws IOException {
// make sure the path is an EZ
EncryptionZone ez = dfs.getEZForPath(path);
if (ez == null) {
throw new IllegalArgumentException(path + " is not an encryption zone.");
}
String ezPath = ez.getPath();
if (!path.toString().equals(ezPath)) {
throw new IllegalArgumentException(path + " is not the root of an " +
"encryption zone. Do you mean " + ez.getPath() + "?");
}
// check if the trash directory exists
Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
try {
FileStatus trashFileStatus = getFileStatus(trashPath);
String errMessage = "Will not provision new trash directory for " +
"encryption zone " + ez.getPath() + ". Path already exists.";
if (!trashFileStatus.isDirectory()) {
errMessage += "\r\n" +
"Warning: " + trashPath.toString() + " is not a directory";
}
if (!trashFileStatus.getPermission().equals(trashPermission)) {
errMessage += "\r\n" +
"Warning: the permission of " +
trashPath.toString() + " is not " + trashPermission;
}
throw new FileAlreadyExistsException(errMessage);
} catch (FileNotFoundException ignored) {
// no trash path
}
// Update the permission bits
mkdir(trashPath, trashPermission);
setPermission(trashPath, trashPermission);
}
/**
* HDFS only.
*
* Provision snapshottable directory trash.
* @param path Path to a snapshottable directory.
* @param trashPermission Expected FsPermission of the trash root.
* @return Path of the provisioned trash root
*/
public Path provisionSnapshotTrash(final Path path,
final FsPermission trashPermission) throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<Path>() {
@Override
public Path doCall(Path p) throws IOException {
return provisionSnapshotTrash(getPathName(p), trashPermission);
}
@Override
public Path next(FileSystem fs, Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.provisionSnapshotTrash(p, trashPermission);
}
throw new UnsupportedOperationException(
"Cannot provisionSnapshotTrash through a symlink to" +
" a non-DistributedFileSystem: " + fs + " -> " + p);
}
}.resolve(this, absF);
}
private Path provisionSnapshotTrash(
String pathStr, FsPermission trashPermission) throws IOException {
Path path = new Path(pathStr);
// Given path must be a snapshottable directory
FileStatus fileStatus = getFileStatus(path);
if (!fileStatus.isSnapshotEnabled()) {
throw new IllegalArgumentException(
path + " is not a snapshottable directory.");
}
// Check if trash root already exists
Path trashPath = new Path(path, FileSystem.TRASH_PREFIX);
try {
FileStatus trashFileStatus = getFileStatus(trashPath);
boolean throwException = false;
String errMessage = "Can't provision trash for snapshottable directory " +
pathStr + " because trash path " + trashPath.toString() +
" already exists.";
if (!trashFileStatus.isDirectory()) {
throwException = true;
errMessage += "\r\n" +
"WARNING: " + trashPath.toString() + " is not a directory.";
}
if (!trashFileStatus.getPermission().equals(trashPermission)) {
throwException = true;
errMessage += "\r\n" +
"WARNING: Permission of " + trashPath.toString() +
" differs from provided permission " + trashPermission;
}
if (throwException) {
throw new FileAlreadyExistsException(errMessage);
}
} catch (FileNotFoundException ignored) {
// Trash path doesn't exist. Continue
}
// Create trash root and set the permission
mkdir(trashPath, trashPermission);
setPermission(trashPath, trashPermission);
// Print a warning if snapshot trash root feature is not enabled
if (!isSnapshotTrashRootEnabled()) {
DFSClient.LOG.warn("New trash is provisioned, but the snapshot trash root"
+ " feature is disabled. This new trash but won't be automatically"
+ " utilized unless the feature is enabled on the NameNode.");
}
return trashPath;
}
@Override
public void setXAttr(Path path, final String name, final byte[] value,
final EnumSet<XAttrSetFlag> flag) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_XATTR);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.setXAttr(getPathName(p), name, value, flag);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.setXAttr(p, name, value, flag);
return null;
}
}.resolve(this, absF);
}
@Override
public byte[] getXAttr(Path path, final String name) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_XATTR);
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<byte[]>() {
@Override
public byte[] doCall(final Path p) throws IOException {
return dfs.getXAttr(getPathName(p), name);
}
@Override
public byte[] next(final FileSystem fs, final Path p) throws IOException {
return fs.getXAttr(p, name);
}
}.resolve(this, absF);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<Map<String, byte[]>>() {
@Override
public Map<String, byte[]> doCall(final Path p) throws IOException {
return dfs.getXAttrs(getPathName(p));
}
@Override
public Map<String, byte[]> next(final FileSystem fs, final Path p)
throws IOException {
return fs.getXAttrs(p);
}
}.resolve(this, absF);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<Map<String, byte[]>>() {
@Override
public Map<String, byte[]> doCall(final Path p) throws IOException {
return dfs.getXAttrs(getPathName(p), names);
}
@Override
public Map<String, byte[]> next(final FileSystem fs, final Path p)
throws IOException {
return fs.getXAttrs(p, names);
}
}.resolve(this, absF);
}
@Override
public List<String> listXAttrs(Path path)
throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<List<String>>() {
@Override
public List<String> doCall(final Path p) throws IOException {
return dfs.listXAttrs(getPathName(p));
}
@Override
public List<String> next(final FileSystem fs, final Path p)
throws IOException {
return fs.listXAttrs(p);
}
}.resolve(this, absF);
}
@Override
public void removeXAttr(Path path, final String name) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_XATTR);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeXAttr(getPathName(p), name);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeXAttr(p, name);
return null;
}
}.resolve(this, absF);
}
@Override
public void access(Path path, final FsAction mode) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.checkAccess(getPathName(p), mode);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.access(p, mode);
return null;
}
}.resolve(this, absF);
}
@Override
public URI getKeyProviderUri() throws IOException {
return dfs.getKeyProviderUri();
}
@Override
public KeyProvider getKeyProvider() throws IOException {
return dfs.getKeyProvider();
}
@Override
public DelegationTokenIssuer[] getAdditionalTokenIssuers()
throws IOException {
KeyProvider keyProvider = getKeyProvider();
if (keyProvider instanceof DelegationTokenIssuer) {
return new DelegationTokenIssuer[]{(DelegationTokenIssuer)keyProvider};
}
return null;
}
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
return dfs.getInotifyEventStream();
}
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
throws IOException {
return dfs.getInotifyEventStream(lastReadTxid);
}
/**
* Set the source path to the specified erasure coding policy.
*
* @param path The directory to set the policy
* @param ecPolicyName The erasure coding policy name.
* @throws IOException
*/
public void setErasureCodingPolicy(final Path path,
final String ecPolicyName) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_EC_POLICY);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.setErasureCodingPolicy(getPathName(p), ecPolicyName);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.setErasureCodingPolicy(p, ecPolicyName);
return null;
}
throw new UnsupportedOperationException(
"Cannot setErasureCodingPolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
/**
* Set the source path to satisfy storage policy.
* @param path The source path referring to either a directory or a file.
* @throws IOException
*/
public void satisfyStoragePolicy(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SATISFY_STORAGE_POLICY);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(Path p) throws IOException {
dfs.satisfyStoragePolicy(getPathName(p));
return null;
}
@Override
public Void next(FileSystem fs, Path p) throws IOException {
// DFS only
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.satisfyStoragePolicy(p);
return null;
}
throw new UnsupportedOperationException(
"Cannot satisfyStoragePolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
/**
* Get erasure coding policy information for the specified path.
*
* @param path The path of the file or directory
* @return Returns the policy information if file or directory on the path
* is erasure coded, null otherwise. Null will be returned if directory or
* file has REPLICATION policy.
* @throws IOException
*/
public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_EC_POLICY);
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ErasureCodingPolicy>() {
@Override
public ErasureCodingPolicy doCall(final Path p) throws IOException {
return dfs.getErasureCodingPolicy(getPathName(p));
}
@Override
public ErasureCodingPolicy next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getErasureCodingPolicy(p);
}
throw new UnsupportedOperationException(
"Cannot getErasureCodingPolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
/**
* Retrieve all the erasure coding policies supported by this file system,
* including enabled, disabled and removed policies, but excluding
* REPLICATION policy.
*
* @return all erasure coding policies supported by this file system.
* @throws IOException
*/
public Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_EC_POLICIES);
return Arrays.asList(dfs.getErasureCodingPolicies());
}
/**
* Retrieve all the erasure coding codecs and coders supported by this file
* system.
*
* @return all erasure coding codecs and coders supported by this file system.
* @throws IOException
*/
public Map<String, String> getAllErasureCodingCodecs()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_EC_CODECS);
return dfs.getErasureCodingCodecs();
}
/**
* Add Erasure coding policies to HDFS. For each policy input, schema and
* cellSize are musts, name and id are ignored. They will be automatically
* created and assigned by Namenode once the policy is successfully added,
* and will be returned in the response; policy states will be set to
* DISABLED automatically.
*
* @param policies The user defined ec policy list to add.
* @return Return the response list of adding operations.
* @throws IOException
*/
public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
ErasureCodingPolicy[] policies) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ADD_EC_POLICY);
return dfs.addErasureCodingPolicies(policies);
}
/**
* Remove erasure coding policy.
*
* @param ecPolicyName The name of the policy to be removed.
* @throws IOException
*/
public void removeErasureCodingPolicy(String ecPolicyName)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.REMOVE_EC_POLICY);
dfs.removeErasureCodingPolicy(ecPolicyName);
}
/**
* Enable erasure coding policy.
*
* @param ecPolicyName The name of the policy to be enabled.
* @throws IOException
*/
public void enableErasureCodingPolicy(String ecPolicyName)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ENABLE_EC_POLICY);
dfs.enableErasureCodingPolicy(ecPolicyName);
}
/**
* Disable erasure coding policy.
*
* @param ecPolicyName The name of the policy to be disabled.
* @throws IOException
*/
public void disableErasureCodingPolicy(String ecPolicyName)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.DISABLE_EC_POLICY);
dfs.disableErasureCodingPolicy(ecPolicyName);
}
/**
* Unset the erasure coding policy from the source path.
*
* @param path The directory to unset the policy
* @throws IOException
*/
public void unsetErasureCodingPolicy(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.UNSET_EC_POLICY);
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.unsetErasureCodingPolicy(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.unsetErasureCodingPolicy(p);
return null;
}
throw new UnsupportedOperationException(
"Cannot unsetErasureCodingPolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
/**
* Verifies if the given policies are supported in the given cluster setup.
* If not policy is specified checks for all enabled policies.
* @param policyNames name of policies.
* @return the result if the given policies are supported in the cluster setup
* @throws IOException
*/
public ECTopologyVerifierResult getECTopologyResultForPolicies(
final String... policyNames) throws IOException {
return dfs.getECTopologyResultForPolicies(policyNames);
}
/**
* Get the root directory of Trash for a path in HDFS.
* 1. File in encryption zone returns /ez1/.Trash/username
* 2. File in snapshottable directory returns /snapdir1/.Trash/username
* if dfs.namenode.snapshot.trashroot.enabled is set to true.
* 3. In other cases, or encountered exception when checking the encryption
* zone or when checking snapshot root of the path, returns
* /users/username/.Trash
* Caller appends either Current or checkpoint timestamp for trash destination
* @param path the trash root of the path to be determined.
* @return trash root
*/
@Override
public Path getTrashRoot(Path path) {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT);
if (path == null) {
return super.getTrashRoot(null);
}
// Snapshottable directory trash root, not null if path is inside a
// snapshottable directory and isSnapshotTrashRootEnabled is true from NN.
String ssTrashRoot = null;
try {
if (dfs.isSnapshotTrashRootEnabled()) {
String ssRoot = dfs.getSnapshotRoot(path);
if (ssRoot != null) {
ssTrashRoot = DFSUtilClient.getSnapshotTrashRoot(ssRoot, dfs.ugi);
}
}
} catch (IOException ioe) {
DFSClient.LOG.warn("Exception while checking whether the path is in a "
+ "snapshottable directory", ioe);
}
try {
if (!dfs.isHDFSEncryptionEnabled()) {
if (ssTrashRoot == null) {
// the path is not in a snapshottable directory and EZ is not enabled
return super.getTrashRoot(path);
} else {
return this.makeQualified(new Path(ssTrashRoot));
}
}
} catch (IOException ioe) {
DFSClient.LOG.warn("Exception while checking whether encryption zone is "
+ "supported", ioe);
}
// HDFS encryption is enabled on the cluster at this point, does not
// necessary mean the given path is in an EZ hence the check.
String parentSrc = path.isRoot() ?
path.toUri().getPath() : path.getParent().toUri().getPath();
String ezTrashRoot = null;
try {
EncryptionZone ez = dfs.getEZForPath(parentSrc);
if ((ez != null)) {
ezTrashRoot = DFSUtilClient.getEZTrashRoot(ez, dfs.ugi);
}
} catch (IOException e) {
DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
"path " + parentSrc + ". " + e.getMessage());
}
if (ssTrashRoot == null) {
if (ezTrashRoot == null) {
// The path is neither in a snapshottable directory nor in an EZ
return super.getTrashRoot(path);
} else {
return this.makeQualified(new Path(ezTrashRoot));
}
} else {
if (ezTrashRoot == null) {
return this.makeQualified(new Path(ssTrashRoot));
} else {
// The path is in EZ and in a snapshottable directory
return this.makeQualified(new Path(
ssTrashRoot.length() > ezTrashRoot.length() ?
ssTrashRoot : ezTrashRoot));
}
}
}
/**
* Get all the trash roots of HDFS for current user or for all the users.
* 1. File deleted from encryption zones
* e.g., ez1 rooted at /ez1 has its trash root at /ez1/.Trash/$USER
* 2. File deleted from snapshottable directories
* if dfs.namenode.snapshot.trashroot.enabled is set to true.
* e.g., snapshottable directory /snapdir1 has its trash root
* at /snapdir1/.Trash/$USER
* 3. File deleted from other directories
* /user/username/.Trash
* @param allUsers return trashRoots of all users if true, used by emptier
* @return trash roots of HDFS
*/
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOTS);
Set<FileStatus> ret = new HashSet<>();
// Get normal trash roots
ret.addAll(super.getTrashRoots(allUsers));
try {
// Get EZ Trash roots
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
EncryptionZone ez = it.next();
Path ezTrashRoot = new Path(ez.getPath(),
FileSystem.TRASH_PREFIX);
if (!exists(ezTrashRoot)) {
continue;
}
if (allUsers) {
for (FileStatus candidate : listStatus(ezTrashRoot)) {
if (exists(candidate.getPath())) {
ret.add(candidate);
}
}
} else {
Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
try {
ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) {
}
}
}
} catch (IOException e){
DFSClient.LOG.warn("Cannot get all encrypted trash roots", e);
}
try {
// Get snapshottable directory trash roots
if (dfs.isSnapshotTrashRootEnabled()) {
SnapshottableDirectoryStatus[] lst = dfs.getSnapshottableDirListing();
if (lst != null) {
for (SnapshottableDirectoryStatus dirStatus : lst) {
String ssDir = dirStatus.getFullPath().toString();
Path ssTrashRoot = new Path(ssDir, FileSystem.TRASH_PREFIX);
if (!exists(ssTrashRoot)) {
continue;
}
if (allUsers) {
for (FileStatus candidate : listStatus(ssTrashRoot)) {
if (exists(candidate.getPath())) {
ret.add(candidate);
}
}
} else {
Path userTrash = new Path(DFSUtilClient.getSnapshotTrashRoot(
ssDir, dfs.ugi));
try {
ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) {
}
}
}
}
}
} catch (IOException e) {
DFSClient.LOG.warn("Cannot get snapshot trash roots", e);
}
return ret;
}
@Override
protected Path fixRelativePart(Path p) {
return super.fixRelativePart(p);
}
Statistics getFsStatistics() {
return statistics;
}
DFSOpsCountStatistics getDFSOpsCountStatistics() {
return storageStatistics;
}
/**
* HdfsDataOutputStreamBuilder provides the HDFS-specific capabilities to
* write file on HDFS.
*/
public static final
|
SnapshotDiffReportListingIterator
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/fixed/marshall/simple/BindySimpleFixedLengthMarshallTest.java
|
{
"start": 3420,
"end": 4116
}
|
class ____ extends RouteBuilder {
@Override
public void configure() {
// default should errors go to mock:error
errorHandler(deadLetterChannel(URI_MOCK_ERROR).redeliveryDelay(0));
onException(Exception.class).maximumRedeliveries(0).handled(true);
BindyDataFormat bindy = new BindyDataFormat();
bindy.setLocale("en");
bindy.setClassType(Order.class);
bindy.type(BindyType.Fixed);
from(URI_DIRECT_START)
.marshal(bindy)
.to(URI_MOCK_RESULT);
}
}
@FixedLengthRecord(length = 65, paddingChar = ' ')
public static
|
ContextConfig
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java
|
{
"start": 8101,
"end": 8571
}
|
class ____ on the classpath does not which is why this exception appears.",
e);
}
field.setAccessible(true);
try {
field.set(throwable, newDetailMessage);
} catch (IllegalAccessException e) {
throw new IllegalStateException(
"The JDK Throwable contains a private detailMessage member that should be accessible through reflection. This is not the case for the Throwable
|
provided
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/file/FileProducerFileExistIgnoreTest.java
|
{
"start": 1089,
"end": 2058
}
|
class ____ extends ContextTestSupport {
private static final String TEST_FILE_NAME = "hello" + UUID.randomUUID() + ".txt";
@Test
public void testIgnore() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
mock.expectedFileExists(testFile(TEST_FILE_NAME), "Hello World");
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
template.sendBodyAndHeader(fileUri("?fileExist=Ignore"), "Bye World", Exchange.FILE_NAME, TEST_FILE_NAME);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri("?noop=true&initialDelay=0&delay=10")).convertBodyTo(String.class).to("mock:result");
}
};
}
}
|
FileProducerFileExistIgnoreTest
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/recording/RecorderContext.java
|
{
"start": 256,
"end": 342
}
|
class ____ contains methods that can be needed for dealing with recorders.
*/
public
|
that
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/InterceptedInferenceKnnVectorQueryBuilder.java
|
{
"start": 5911,
"end": 13515
}
|
class ____ support it, which would
// be a server-side error.
throw new IllegalStateException(
"No [" + TextEmbeddingQueryVectorBuilder.NAME + "] query vector builder or query vector specified"
);
}
// Check if we are querying any non-inference fields
Collection<IndexMetadata> indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values();
for (IndexMetadata indexMetadata : indexMetadataCollection) {
InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(getField());
if (inferenceFieldMetadata == null) {
QueryVectorBuilder queryVectorBuilder = originalQuery.queryVectorBuilder();
if (queryVectorBuilder instanceof TextEmbeddingQueryVectorBuilder textEmbeddingQueryVectorBuilder
&& textEmbeddingQueryVectorBuilder.getModelId() == null) {
throw new IllegalArgumentException("[model_id] must not be null.");
}
}
}
}
@Override
protected QueryBuilder doRewriteBwC(QueryRewriteContext queryRewriteContext) throws IOException {
QueryBuilder rewritten = this;
if (queryRewriteContext.getMinTransportVersion().supports(NEW_SEMANTIC_QUERY_INTERCEPTORS) == false) {
rewritten = BWC_INTERCEPTOR.interceptAndRewrite(queryRewriteContext, originalQuery);
}
return rewritten;
}
@Override
protected InterceptedInferenceQueryBuilder<KnnVectorQueryBuilder> copy(
Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap,
SetOnce<Map<FullyQualifiedInferenceId, InferenceResults>> inferenceResultsMapSupplier,
boolean ccsRequest
) {
return new InterceptedInferenceKnnVectorQueryBuilder(this, inferenceResultsMap, inferenceResultsMapSupplier, ccsRequest);
}
@Override
protected QueryBuilder queryFields(
Map<String, Float> inferenceFields,
Map<String, Float> nonInferenceFields,
QueryRewriteContext indexMetadataContext
) {
QueryBuilder rewritten;
MappedFieldType fieldType = indexMetadataContext.getFieldType(getField());
if (fieldType == null) {
rewritten = new MatchNoneQueryBuilder();
} else if (fieldType instanceof SemanticTextFieldMapper.SemanticTextFieldType semanticTextFieldType) {
rewritten = querySemanticTextField(indexMetadataContext.getLocalClusterAlias(), semanticTextFieldType);
} else {
rewritten = queryNonSemanticTextField();
}
return rewritten;
}
@Override
protected boolean resolveWildcards() {
return false;
}
@Override
protected boolean useDefaultFields() {
return false;
}
@Override
public String getWriteableName() {
return NAME;
}
private String getField() {
return originalQuery.getFieldName();
}
private String getQueryVectorBuilderModelId() {
String modelId = null;
QueryVectorBuilder queryVectorBuilder = originalQuery.queryVectorBuilder();
if (queryVectorBuilder instanceof TextEmbeddingQueryVectorBuilder textEmbeddingQueryVectorBuilder) {
modelId = textEmbeddingQueryVectorBuilder.getModelId();
}
return modelId;
}
private QueryBuilder querySemanticTextField(String clusterAlias, SemanticTextFieldMapper.SemanticTextFieldType semanticTextFieldType) {
MinimalServiceSettings modelSettings = semanticTextFieldType.getModelSettings();
if (modelSettings == null) {
// No inference results have been indexed yet
return new MatchNoneQueryBuilder();
} else if (modelSettings.taskType() != TaskType.TEXT_EMBEDDING) {
throw new IllegalArgumentException("Field [" + getField() + "] does not use a [" + TaskType.TEXT_EMBEDDING + "] model");
}
VectorData queryVector = originalQuery.queryVector();
if (queryVector == null) {
FullyQualifiedInferenceId fullyQualifiedInferenceId = getInferenceIdOverride();
if (fullyQualifiedInferenceId == null) {
fullyQualifiedInferenceId = new FullyQualifiedInferenceId(clusterAlias, semanticTextFieldType.getSearchInferenceId());
}
MlDenseEmbeddingResults textEmbeddingResults = getTextEmbeddingResults(fullyQualifiedInferenceId);
queryVector = new VectorData(textEmbeddingResults.getInferenceAsFloat());
}
KnnVectorQueryBuilder innerKnnQuery = new KnnVectorQueryBuilder(
SemanticTextField.getEmbeddingsFieldName(getField()),
queryVector,
originalQuery.k(),
originalQuery.numCands(),
originalQuery.visitPercentage(),
originalQuery.rescoreVectorBuilder(),
originalQuery.getVectorSimilarity()
);
innerKnnQuery.addFilterQueries(originalQuery.filterQueries());
return QueryBuilders.nestedQuery(SemanticTextField.getChunksFieldName(getField()), innerKnnQuery, ScoreMode.Max)
.boost(originalQuery.boost())
.queryName(originalQuery.queryName());
}
private QueryBuilder queryNonSemanticTextField() {
VectorData queryVector = originalQuery.queryVector();
if (queryVector == null) {
FullyQualifiedInferenceId fullyQualifiedInferenceId = getInferenceIdOverride();
if (fullyQualifiedInferenceId == null) {
// This should never happen because we validate that either query vector or a valid query vector builder is specified in:
// - The KnnVectorQueryBuilder constructor
// - coordinatorNodeValidate
throw new IllegalStateException("No query vector or query vector builder model ID specified");
}
MlDenseEmbeddingResults textEmbeddingResults = getTextEmbeddingResults(fullyQualifiedInferenceId);
queryVector = new VectorData(textEmbeddingResults.getInferenceAsFloat());
}
KnnVectorQueryBuilder knnQuery = new KnnVectorQueryBuilder(
getField(),
queryVector,
originalQuery.k(),
originalQuery.numCands(),
originalQuery.visitPercentage(),
originalQuery.rescoreVectorBuilder(),
originalQuery.getVectorSimilarity()
).boost(originalQuery.boost()).queryName(originalQuery.queryName());
knnQuery.addFilterQueries(originalQuery.filterQueries());
return knnQuery;
}
private MlDenseEmbeddingResults getTextEmbeddingResults(FullyQualifiedInferenceId fullyQualifiedInferenceId) {
InferenceResults inferenceResults = inferenceResultsMap.get(fullyQualifiedInferenceId);
if (inferenceResults == null) {
throw new IllegalStateException("Could not find inference results from inference endpoint [" + fullyQualifiedInferenceId + "]");
} else if (inferenceResults instanceof MlDenseEmbeddingResults == false) {
throw new IllegalArgumentException(
"Expected query inference results to be of type ["
+ MlDenseEmbeddingResults.NAME
+ "], got ["
+ inferenceResults.getWriteableName()
+ "]. Are you specifying a compatible inference endpoint? Has the inference endpoint configuration changed?"
);
}
return (MlDenseEmbeddingResults) inferenceResults;
}
}
|
to
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/timestamp_with_timezone/Mapper.java
|
{
"start": 810,
"end": 1147
}
|
interface ____ {
@Select("select id, odt, odt ot from records where id = #{id}")
Record selectById(Integer id);
@Insert("insert into records (id, odt) values (#{id}, #{odt})")
int insertOffsetDateTime(Record record);
@Insert("insert into records (id, odt) values (#{id}, #{ot})")
int insertOffsetTime(Record record);
}
|
Mapper
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java
|
{
"start": 1274,
"end": 6661
}
|
class ____ extends ActionResponse implements ToXContentFragment {
private final TimeValue took;
private final BulkByScrollTask.Status status;
private final List<Failure> bulkFailures;
private final List<ScrollableHitSource.SearchFailure> searchFailures;
private boolean timedOut;
static final String TOOK_FIELD = "took";
static final String TIMED_OUT_FIELD = "timed_out";
static final String FAILURES_FIELD = "failures";
public BulkByScrollResponse(StreamInput in) throws IOException {
took = in.readTimeValue();
status = new BulkByScrollTask.Status(in);
bulkFailures = in.readCollectionAsList(Failure::new);
searchFailures = in.readCollectionAsList(ScrollableHitSource.SearchFailure::new);
timedOut = in.readBoolean();
}
public BulkByScrollResponse(
TimeValue took,
BulkByScrollTask.Status status,
List<Failure> bulkFailures,
List<ScrollableHitSource.SearchFailure> searchFailures,
boolean timedOut
) {
this.took = took;
this.status = requireNonNull(status, "Null status not supported");
this.bulkFailures = bulkFailures;
this.searchFailures = searchFailures;
this.timedOut = timedOut;
}
public BulkByScrollResponse(Iterable<BulkByScrollResponse> toMerge, @Nullable String reasonCancelled) {
long mergedTook = 0;
List<BulkByScrollTask.StatusOrException> statuses = new ArrayList<>();
bulkFailures = new ArrayList<>();
searchFailures = new ArrayList<>();
for (BulkByScrollResponse response : toMerge) {
mergedTook = max(mergedTook, response.getTook().nanos());
statuses.add(new BulkByScrollTask.StatusOrException(response.status));
bulkFailures.addAll(response.getBulkFailures());
searchFailures.addAll(response.getSearchFailures());
timedOut |= response.isTimedOut();
}
took = timeValueNanos(mergedTook);
status = new BulkByScrollTask.Status(statuses, reasonCancelled);
}
public TimeValue getTook() {
return took;
}
public BulkByScrollTask.Status getStatus() {
return status;
}
public long getCreated() {
return status.getCreated();
}
public long getTotal() {
return status.getTotal();
}
public long getDeleted() {
return status.getDeleted();
}
public long getUpdated() {
return status.getUpdated();
}
public int getBatches() {
return status.getBatches();
}
public long getVersionConflicts() {
return status.getVersionConflicts();
}
public long getNoops() {
return status.getNoops();
}
/**
* The reason that the request was canceled or null if it hasn't been.
*/
public String getReasonCancelled() {
return status.getReasonCancelled();
}
/**
* The number of times that the request had retry bulk actions.
*/
public long getBulkRetries() {
return status.getBulkRetries();
}
/**
* The number of times that the request had retry search actions.
*/
public long getSearchRetries() {
return status.getSearchRetries();
}
/**
* All of the bulk failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the default).
*/
public List<Failure> getBulkFailures() {
return bulkFailures;
}
/**
* All search failures.
*/
public List<ScrollableHitSource.SearchFailure> getSearchFailures() {
return searchFailures;
}
/**
* Did any of the sub-requests that were part of this request timeout?
*/
public boolean isTimedOut() {
return timedOut;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeTimeValue(took);
status.writeTo(out);
out.writeCollection(bulkFailures);
out.writeCollection(searchFailures);
out.writeBoolean(timedOut);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(TOOK_FIELD, took.millis());
builder.field(TIMED_OUT_FIELD, timedOut);
status.innerXContent(builder, params);
builder.startArray(FAILURES_FIELD);
for (Failure failure : bulkFailures) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();
}
for (ScrollableHitSource.SearchFailure failure : searchFailures) {
failure.toXContent(builder, params);
}
builder.endArray();
return builder;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(getClass().getSimpleName()).append("[");
builder.append("took=").append(took).append(',');
builder.append("timed_out=").append(timedOut).append(',');
status.innerToString(builder);
builder.append(",bulk_failures=").append(getBulkFailures().subList(0, min(3, getBulkFailures().size())));
builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size())));
return builder.append(']').toString();
}
}
|
BulkByScrollResponse
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/deployment/TestVerticle.java
|
{
"start": 646,
"end": 1126
}
|
class ____ extends AbstractVerticle {
public static AtomicInteger instanceCount = new AtomicInteger();
public static JsonObject conf;
public TestVerticle() {
}
@Override
public void start() throws Exception {
conf = context.config();
vertx.eventBus().send("testcounts",
new JsonObject().put("deploymentID", context.deploymentID()).put("count", instanceCount.incrementAndGet()));
}
@Override
public void stop() throws Exception {
}
}
|
TestVerticle
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/util/OutgoingHttpRequestProcessor.java
|
{
"start": 839,
"end": 1131
}
|
interface ____ {
/**
*
* @param matcher Request Matcher
* @param request The request
* @return true if the request should be processed
*/
boolean shouldProcessRequest(OutgoingRequestProcessorMatcher matcher, HttpRequest<?> request);
}
|
OutgoingHttpRequestProcessor
|
java
|
apache__flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
{
"start": 7927,
"end": 8450
}
|
class ____<
T, SourceT extends Source<T, ?, ?>, FromEnumT extends SplitEnumerator>
implements SourceFactory<T, SourceT, FromEnumT> {
private final SourceT source;
private PassthroughSourceFactory(SourceT source) {
this.source = source;
}
@Override
public SourceT create(SourceSwitchContext<FromEnumT> context) {
return source;
}
}
/** Entry for list of underlying sources. */
static
|
PassthroughSourceFactory
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/BulkDeleteRetryHandler.java
|
{
"start": 1762,
"end": 5183
}
|
class ____ extends AbstractStoreOperation {
private static final Logger LOG = LoggerFactory.getLogger(
BulkDeleteRetryHandler.class);
private static final Logger THROTTLE_LOG = LoggerFactory.getLogger(
THROTTLE_LOG_NAME);
/**
* This is an error string we see in exceptions when the XML parser
* failed: {@value}.
*/
public static final String XML_PARSE_BROKEN = "Failed to parse XML document";
private final S3AStatisticsContext instrumentation;
private final S3AStorageStatistics storageStatistics;
/**
* Constructor.
* @param storeContext context
*/
public BulkDeleteRetryHandler(final StoreContext storeContext) {
super(storeContext);
instrumentation = storeContext.getInstrumentation();
storageStatistics = storeContext.getStorageStatistics();
}
/**
* Increment a statistic by 1.
* This increments both the instrumentation and storage statistics.
* @param statistic The operation to increment
*/
protected void incrementStatistic(Statistic statistic) {
incrementStatistic(statistic, 1);
}
/**
* Increment a statistic by a specific value.
* This increments both the instrumentation and storage statistics.
* @param statistic The operation to increment
* @param count the count to increment
*/
protected void incrementStatistic(Statistic statistic, long count) {
instrumentation.incrementCounter(statistic, count);
}
/**
* Handler for failure of bulk delete requests.
* @param deleteRequest request which was retried.
* @param ex exception
*/
public void bulkDeleteRetried(
DeleteObjectsRequest deleteRequest,
Exception ex) {
LOG.debug("Retrying on error during bulk delete", ex);
if (isThrottleException(ex)) {
onDeleteThrottled(deleteRequest);
} else if (isSymptomOfBrokenConnection(ex)) {
// this is one which surfaces when an HTTPS connection is broken while
// the service is reading the result.
// it is treated as a throttle event for statistics
LOG.warn("Bulk delete operation interrupted: {}", ex.getMessage());
onDeleteThrottled(deleteRequest);
} else {
incrementStatistic(IGNORED_ERRORS);
}
}
/**
* Handle a delete throttling event.
* @param deleteRequest request which failed.
*/
private void onDeleteThrottled(final DeleteObjectsRequest deleteRequest) {
final List<ObjectIdentifier> keys = deleteRequest.delete().objects();
final int size = keys.size();
incrementStatistic(STORE_IO_THROTTLED, size);
instrumentation.addValueToQuantiles(STORE_IO_THROTTLE_RATE, size);
THROTTLE_LOG.info(
"Bulk delete {} keys throttled -first key = {}; last = {}",
size,
keys.get(0).key(),
keys.get(size - 1).key());
}
/**
* Does this error indicate that the connection was ultimately broken while
* the XML Response was parsed? As this seems a symptom of the far end
* blocking the response (i.e. server-side throttling) while
* the client eventually times out.
* @param ex exception received.
* @return true if this exception is considered a sign of a broken connection.
*/
private boolean isSymptomOfBrokenConnection(final Exception ex) {
return ex instanceof AWSClientIOException
&& ex.getCause() instanceof SdkClientException
&& ex.getMessage().contains(XML_PARSE_BROKEN);
}
}
|
BulkDeleteRetryHandler
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/node/NullNodeTest.java
|
{
"start": 306,
"end": 553
}
|
class ____ {
ObjectNode _object;
ArrayNode _array;
public void setObject(ObjectNode n) { _object = n; }
public void setArray(ArrayNode n) { _array = n; }
}
@SuppressWarnings("serial")
static
|
CovarianceBean
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MockClusterAlertScriptEngine.java
|
{
"start": 2813,
"end": 3598
}
|
class ____ implements WatcherConditionScript.Factory {
private final MockDeterministicScript script;
MockWatcherConditionScript(MockDeterministicScript script) {
this.script = script;
}
@Override
public WatcherConditionScript newInstance(Map<String, Object> params, WatchExecutionContext watcherContext) {
return new WatcherConditionScript(params, watcherContext) {
@Override
public boolean execute() {
return (boolean) script.apply(getParams());
}
};
}
}
/**
* A mock watcher transformation script that performs a given function instead of whatever the source provided was.
*/
static
|
MockWatcherConditionScript
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/YieldingOperator.java
|
{
"start": 1078,
"end": 1316
}
|
interface ____ not needed when using {@link StreamOperatorFactory} or {@link
* AbstractStreamOperatorV2} as those have access to the {@link MailboxExecutor} via {@link
* StreamOperatorParameters#getMailboxExecutor()}
*/
@Internal
public
|
is
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/dev/RuntimeUpdatesProcessor.java
|
{
"start": 62846,
"end": 64066
}
|
class ____ based restart
lastStartIndex = null;
}
public static void setLastStartIndex(IndexView lastStartIndex) {
RuntimeUpdatesProcessor.lastStartIndex = lastStartIndex;
}
@Override
public void close() throws IOException {
compiler.close();
if (testClassChangeWatcher != null) {
testClassChangeWatcher.close();
}
if (testClassChangeTimer != null) {
testClassChangeTimer.cancel();
}
}
public boolean toggleInstrumentation() {
instrumentationEnabled = !instrumentationEnabled();
if (instrumentationEnabled) {
log.info("Instrumentation based restart enabled");
} else {
log.info("Instrumentation based restart disabled");
}
return instrumentationEnabled;
}
public boolean toggleLiveReloadEnabled() {
liveReloadEnabled = !liveReloadEnabled;
if (liveReloadEnabled) {
log.info("Live reload enabled");
} else {
log.info("Live reload disabled");
}
return liveReloadEnabled;
}
public boolean isLiveReloadEnabled() {
return liveReloadEnabled;
}
static
|
loader
|
java
|
quarkusio__quarkus
|
extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/test/jaxrs/OpenApiResourceAuthenticatedAtClassLevel.java
|
{
"start": 448,
"end": 946
}
|
class ____ {
private ResourceBean2 resourceBean;
@GET
@Path("/test-security/classLevel/1")
public String secureEndpoint1() {
return "secret";
}
@GET
@Path("/test-security/classLevel/2")
public String secureEndpoint2() {
return "secret";
}
@GET
@Path("/test-security/classLevel/3")
@SecurityRequirement(name = "MyOwnName")
public String secureEndpoint3() {
return "secret";
}
}
|
OpenApiResourceAuthenticatedAtClassLevel
|
java
|
quarkusio__quarkus
|
extensions/hibernate-validator/runtime/src/main/java/io/quarkus/hibernate/validator/runtime/jaxrs/ValidatorMediaTypeUtil.java
|
{
"start": 225,
"end": 2104
}
|
class ____ {
static final List<MediaType> SUPPORTED_MEDIA_TYPES = Arrays.asList(
MediaType.APPLICATION_JSON_TYPE,
MediaType.APPLICATION_XML_TYPE,
MediaType.TEXT_XML_TYPE,
MediaType.TEXT_PLAIN_TYPE);
private ValidatorMediaTypeUtil() {
}
/**
* Look up the right media type taking into account the HTTP request and the media types defined in the `@Produces`
* annotation.
*
* @param mediaTypesFromRequest list of media types in the HTTP request.
* @param mediaTypesFromProducesAnnotation list of media types set in the `@Produces` annotation.
* @return one supported media type from either the HTTP request or the annotation.
*/
public static MediaType getAcceptMediaType(List<MediaType> mediaTypesFromRequest,
List<MediaType> mediaTypesFromProducesAnnotation) {
for (MediaType mediaType : mediaTypesFromRequest) {
// It's supported and is included in the `@Produces` annotation
if (isMediaTypeInList(mediaType, SUPPORTED_MEDIA_TYPES)
&& isMediaTypeInList(mediaType, mediaTypesFromProducesAnnotation)) {
return mediaType;
}
}
// if none is found, then return the first from the annotation or empty if no produces annotation
if (mediaTypesFromProducesAnnotation.isEmpty()) {
return null;
}
return mediaTypesFromProducesAnnotation.get(0);
}
private static boolean isMediaTypeInList(MediaType mediaType, List<MediaType> list) {
for (MediaType item : list) {
if (mediaType.getType().equalsIgnoreCase(item.getType())
&& mediaType.getSubtype().equalsIgnoreCase(item.getSubtype())) {
return true;
}
}
return false;
}
}
|
ValidatorMediaTypeUtil
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
|
{
"start": 3444,
"end": 4189
}
|
class ____ implements EventHandler<Event> {
@Override
public void handle(Event event) {
// Use hashCode (of ApplicationId) to dispatch the event to the child
// dispatcher, such that all the writing events of one application will
// be handled by one thread, the scheduled order of the these events
// will be preserved
int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size();
dispatchers.get(index).getEventHandler().handle(event);
}
}
protected AsyncDispatcher createDispatcher() {
return new AsyncDispatcher("RM Timeline dispatcher");
}
}
/**
* EventType which is used while publishing the events.
*/
protected
|
CompositEventHandler
|
java
|
micronaut-projects__micronaut-core
|
aop/src/main/java/io/micronaut/aop/chain/MethodInterceptorChain.java
|
{
"start": 1838,
"end": 2113
}
|
class ____ {@link MethodInvocationContext} and is
* consumed by the framework itself and should not be used directly in application code.
*
* @param <T> type
* @param <R> result
* @author Graeme Rocher
* @since 1.0
*/
@Internal
@UsedByGeneratedCode
public final
|
implements
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/reactive/server/WebTestClient.java
|
{
"start": 30718,
"end": 34457
}
|
interface ____ {
/**
* Apply multiple assertions to a response with the given
* {@linkplain ResponseSpecConsumer consumers}, with the guarantee that
* all assertions will be applied even if one or more assertions fails
* with an exception.
* <p>If a single {@link Error} or {@link RuntimeException} is thrown,
* it will be rethrown.
* <p>If multiple exceptions are thrown, this method will throw an
* {@link AssertionError} whose error message is a summary of all the
* exceptions. In addition, each exception will be added as a
* {@linkplain Throwable#addSuppressed(Throwable) suppressed exception} to
* the {@code AssertionError}.
* <p>This feature is similar to the {@code SoftAssertions} support in
* AssertJ and the {@code assertAll()} support in JUnit Jupiter.
*
* <h4>Example</h4>
* <pre class="code">
* webTestClient.get().uri("/hello").exchange()
* .expectAll(
* responseSpec -> responseSpec.expectStatus().isOk(),
* responseSpec -> responseSpec.expectBody(String.class).isEqualTo("Hello, World!")
* );
* </pre>
* @param consumers the list of {@code ResponseSpec} consumers
* @since 5.3.10
*/
ResponseSpec expectAll(ResponseSpecConsumer... consumers);
/**
* Assertions on the response status.
*/
StatusAssertions expectStatus();
/**
* Assertions on the headers of the response.
*/
HeaderAssertions expectHeader();
/**
* Assertions on the cookies of the response.
* @since 5.3
*/
CookieAssertions expectCookie();
/**
* Consume and decode the response body to a single object of type
* {@code <B>} and then apply assertions.
* @param bodyType the expected body type
*/
<B> BodySpec<B, ?> expectBody(Class<B> bodyType);
/**
* Alternative to {@link #expectBody(Class)} that accepts information
* about a target type with generics.
*/
<B> BodySpec<B, ?> expectBody(ParameterizedTypeReference<B> bodyType);
/**
* Consume and decode the response body to {@code List<E>} and then apply
* List-specific assertions.
* @param elementType the expected List element type
*/
<E> ListBodySpec<E> expectBodyList(Class<E> elementType);
/**
* Alternative to {@link #expectBodyList(Class)} that accepts information
* about a target type with generics.
*/
<E> ListBodySpec<E> expectBodyList(ParameterizedTypeReference<E> elementType);
/**
* Consume and decode the response body to {@code byte[]} and then apply
* assertions on the raw content (for example, isEmpty, JSONPath, etc.).
*/
BodyContentSpec expectBody();
/**
* Return an {@link ExchangeResult} with the raw content. Effectively, a shortcut for:
* <pre class="code">
* .returnResult(byte[].class)
* </pre>
* @since 7.0
*/
default ExchangeResult returnResult() {
return returnResult(byte[].class);
}
/**
* Exit the chained flow in order to consume the response body externally,
* for example, via {@link reactor.test.StepVerifier}.
* <p>Note that when {@code Void.class} is passed in, the response body
* is consumed and released. If no content is expected, then consider
* using {@code .expectBody().isEmpty()} instead which asserts that
* there is no content.
*/
<T> FluxExchangeResult<T> returnResult(Class<T> elementClass);
/**
* Alternative to {@link #returnResult(Class)} that accepts information
* about a target type with generics.
*/
<T> FluxExchangeResult<T> returnResult(ParameterizedTypeReference<T> elementTypeRef);
/**
* {@link Consumer} of a {@link ResponseSpec}.
* @since 5.3.10
* @see ResponseSpec#expectAll(ResponseSpecConsumer...)
*/
@FunctionalInterface
|
ResponseSpec
|
java
|
elastic__elasticsearch
|
client/rest/src/main/java/org/elasticsearch/client/RestClient.java
|
{
"start": 42378,
"end": 42653
}
|
class ____ extends ByteArrayOutputStream {
ByteArrayInputOutputStream(int size) {
super(size);
}
public InputStream asInput() {
return new ByteArrayInputStream(this.buf, 0, this.count);
}
}
}
|
ByteArrayInputOutputStream
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableUnsubscribeOnTest.java
|
{
"start": 6235,
"end": 9571
}
|
class ____ extends Scheduler {
private final Scheduler eventLoop;
private volatile Thread t;
public UIEventLoopScheduler() {
eventLoop = Schedulers.single();
/*
* DON'T DO THIS IN PRODUCTION CODE
*/
final CountDownLatch latch = new CountDownLatch(1);
eventLoop.scheduleDirect(new Runnable() {
@Override
public void run() {
t = Thread.currentThread();
latch.countDown();
}
});
try {
latch.await();
} catch (InterruptedException e) {
throw new RuntimeException("failed to initialize and get inner thread");
}
}
@NonNull
@Override
public Worker createWorker() {
return eventLoop.createWorker();
}
public Thread getThread() {
return t;
}
}
@Test
public void takeHalf() {
int elements = 1024;
Flowable.range(0, elements * 2).unsubscribeOn(Schedulers.single())
.take(elements)
.to(TestHelper.<Integer>testConsumer())
.awaitDone(5, TimeUnit.SECONDS)
.assertValueCount(elements)
.assertComplete()
.assertNoErrors()
.assertSubscribed();
}
@Test
public void dispose() {
TestHelper.checkDisposed(Flowable.just(1).unsubscribeOn(Schedulers.single()));
}
@Test
public void normal() {
final int[] calls = { 0 };
Flowable.just(1)
.doOnCancel(new Action() {
@Override
public void run() throws Exception {
calls[0]++;
}
})
.unsubscribeOn(Schedulers.single())
.test()
.assertResult(1);
assertEquals(0, calls[0]);
}
@Test
public void error() {
final int[] calls = { 0 };
Flowable.error(new TestException())
.doOnCancel(new Action() {
@Override
public void run() throws Exception {
calls[0]++;
}
})
.unsubscribeOn(Schedulers.single())
.test()
.assertFailure(TestException.class);
assertEquals(0, calls[0]);
}
@Test
public void signalAfterDispose() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onNext(1);
subscriber.onNext(2);
subscriber.onError(new TestException());
subscriber.onComplete();
}
}
.unsubscribeOn(Schedulers.single())
.take(1)
.test()
.assertResult(1);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(f -> f.unsubscribeOn(ImmediateThinScheduler.INSTANCE));
}
}
|
UIEventLoopScheduler
|
java
|
alibaba__nacos
|
api/src/main/java/com/alibaba/nacos/api/grpc/auto/RequestGrpc.java
|
{
"start": 1202,
"end": 3889
}
|
class ____ {
private RequestGrpc() {}
public static final String SERVICE_NAME = "Request";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor<com.alibaba.nacos.api.grpc.auto.Payload,
com.alibaba.nacos.api.grpc.auto.Payload> getRequestMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "request",
requestType = com.alibaba.nacos.api.grpc.auto.Payload.class,
responseType = com.alibaba.nacos.api.grpc.auto.Payload.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor<com.alibaba.nacos.api.grpc.auto.Payload,
com.alibaba.nacos.api.grpc.auto.Payload> getRequestMethod() {
io.grpc.MethodDescriptor<com.alibaba.nacos.api.grpc.auto.Payload, com.alibaba.nacos.api.grpc.auto.Payload> getRequestMethod;
if ((getRequestMethod = RequestGrpc.getRequestMethod) == null) {
synchronized (RequestGrpc.class) {
if ((getRequestMethod = RequestGrpc.getRequestMethod) == null) {
RequestGrpc.getRequestMethod = getRequestMethod =
io.grpc.MethodDescriptor.<com.alibaba.nacos.api.grpc.auto.Payload, com.alibaba.nacos.api.grpc.auto.Payload>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(
"Request", "request"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
com.alibaba.nacos.api.grpc.auto.Payload.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
com.alibaba.nacos.api.grpc.auto.Payload.getDefaultInstance()))
.setSchemaDescriptor(new RequestMethodDescriptorSupplier("request"))
.build();
}
}
}
return getRequestMethod;
}
/**
* Creates a new async stub that supports all call types for the service
*/
public static RequestStub newStub(io.grpc.Channel channel) {
return new RequestStub(channel);
}
/**
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
*/
public static RequestBlockingStub newBlockingStub(
io.grpc.Channel channel) {
return new RequestBlockingStub(channel);
}
/**
* Creates a new ListenableFuture-style stub that supports unary calls on the service
*/
public static RequestFutureStub newFutureStub(
io.grpc.Channel channel) {
return new RequestFutureStub(channel);
}
/**
*/
public static abstract
|
RequestGrpc
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/BoxEndpointBuilderFactory.java
|
{
"start": 8377,
"end": 15865
}
|
interface ____
extends
EndpointConsumerBuilder {
default BoxEndpointConsumerBuilder basic() {
return (BoxEndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Custom HTTP params for settings like proxy host.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*
* @param httpParams the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder httpParams(Map<java.lang.String, java.lang.Object> httpParams) {
doSetProperty("httpParams", httpParams);
return this;
}
/**
* Custom HTTP params for settings like proxy host.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*
* @param httpParams the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder httpParams(String httpParams) {
doSetProperty("httpParams", httpParams);
return this;
}
/**
* Custom Access Token Cache for storing and retrieving access tokens.
*
* The option is a: <code>com.box.sdk.IAccessTokenCache</code> type.
*
* Group: security
*
* @param accessTokenCache the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder accessTokenCache(com.box.sdk.IAccessTokenCache accessTokenCache) {
doSetProperty("accessTokenCache", accessTokenCache);
return this;
}
/**
* Custom Access Token Cache for storing and retrieving access tokens.
*
* The option will be converted to a
* <code>com.box.sdk.IAccessTokenCache</code> type.
*
* Group: security
*
* @param accessTokenCache the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder accessTokenCache(String accessTokenCache) {
doSetProperty("accessTokenCache", accessTokenCache);
return this;
}
/**
* The type of encryption algorithm for JWT. Supported Algorithms:
* RSA_SHA_256 RSA_SHA_384 RSA_SHA_512.
*
* The option is a: <code>com.box.sdk.EncryptionAlgorithm</code> type.
*
* Default: RSA_SHA_256
* Group: security
*
* @param encryptionAlgorithm the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder encryptionAlgorithm(com.box.sdk.EncryptionAlgorithm encryptionAlgorithm) {
doSetProperty("encryptionAlgorithm", encryptionAlgorithm);
return this;
}
/**
* The type of encryption algorithm for JWT. Supported Algorithms:
* RSA_SHA_256 RSA_SHA_384 RSA_SHA_512.
*
* The option will be converted to a
* <code>com.box.sdk.EncryptionAlgorithm</code> type.
*
* Default: RSA_SHA_256
* Group: security
*
* @param encryptionAlgorithm the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder encryptionAlgorithm(String encryptionAlgorithm) {
doSetProperty("encryptionAlgorithm", encryptionAlgorithm);
return this;
}
/**
* The maximum number of access tokens in cache.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: security
*
* @param maxCacheEntries the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder maxCacheEntries(int maxCacheEntries) {
doSetProperty("maxCacheEntries", maxCacheEntries);
return this;
}
/**
* The maximum number of access tokens in cache.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: security
*
* @param maxCacheEntries the value to set
* @return the dsl builder
*/
default AdvancedBoxEndpointConsumerBuilder maxCacheEntries(String maxCacheEntries) {
doSetProperty("maxCacheEntries", maxCacheEntries);
return this;
}
}
/**
* Builder for endpoint producers for the Box component.
*/
public
|
AdvancedBoxEndpointConsumerBuilder
|
java
|
quarkusio__quarkus
|
core/processor/src/main/java/io/quarkus/annotation/processor/documentation/config/scanner/ConfigAnnotationScanner.java
|
{
"start": 1804,
"end": 9318
}
|
class ____ {
private final Utils utils;
private final Config config;
private final ConfigCollector configCollector;
private final Set<String> configGroupClassNames = new HashSet<>();
private final Set<String> configRootClassNames = new HashSet<>();
private final Set<String> configMappingWithoutConfigRootClassNames = new HashSet<>();
private final Set<String> enumClassNames = new HashSet<>();
private final List<ConfigAnnotationListener> configRootListeners;
/**
* These are handled specifically as we just want to collect the javadoc.
* They are actually consumed as super interfaces in a config root.
*/
private final List<ConfigAnnotationListener> configMappingWithoutConfigRootListeners;
public ConfigAnnotationScanner(Config config, Utils utils) {
this.config = config;
this.utils = utils;
this.configCollector = new ConfigCollector();
List<ConfigAnnotationListener> configRootListeners = new ArrayList<>();
List<ConfigAnnotationListener> configMappingWithoutConfigRootListeners = new ArrayList<>();
configRootListeners.add(new JavadocConfigMappingListener(config, utils, configCollector));
configRootListeners.add(new ConfigMappingListener(config, utils, configCollector));
configMappingWithoutConfigRootListeners.add(new JavadocConfigMappingListener(config, utils, configCollector));
this.configRootListeners = Collections.unmodifiableList(configRootListeners);
this.configMappingWithoutConfigRootListeners = Collections.unmodifiableList(configMappingWithoutConfigRootListeners);
}
public void scanConfigGroups(RoundEnvironment roundEnv, TypeElement annotation) {
for (TypeElement configGroup : ElementFilter.typesIn(roundEnv.getElementsAnnotatedWith(annotation))) {
if (isConfigGroupAlreadyHandled(configGroup)) {
continue;
}
debug("Detected annotated config group: " + configGroup, configGroup);
try {
DiscoveryConfigGroup discoveryConfigGroup = applyRootListeners(l -> l.onConfigGroup(configGroup));
scanElement(configRootListeners, discoveryConfigGroup, configGroup);
} catch (Exception e) {
throw new IllegalStateException("Unable to scan config group: " + configGroup, e);
}
}
}
public void scanConfigRoots(RoundEnvironment roundEnv, TypeElement annotation) {
for (TypeElement configRoot : typesIn(roundEnv.getElementsAnnotatedWith(annotation))) {
checkConfigRootAnnotationConsistency(configRoot);
final PackageElement pkg = utils.element().getPackageOf(configRoot);
if (pkg == null) {
utils.processingEnv().getMessager().printMessage(Diagnostic.Kind.ERROR,
"Element " + configRoot + " has no enclosing package");
continue;
}
if (isConfigRootAlreadyHandled(configRoot)) {
continue;
}
debug("Detected config root: " + configRoot, configRoot);
try {
DiscoveryConfigRoot discoveryConfigRoot = applyRootListeners(l -> l.onConfigRoot(configRoot));
scanElement(configRootListeners, discoveryConfigRoot, configRoot);
} catch (Exception e) {
throw new IllegalStateException("Unable to scan config root: " + configRoot, e);
}
}
}
/**
* In this case, we will just apply the Javadoc listeners to collect Javadoc.
*/
public void scanConfigMappingsWithoutConfigRoot(RoundEnvironment roundEnv, TypeElement annotation) {
for (TypeElement configMappingWithoutConfigRoot : typesIn(roundEnv.getElementsAnnotatedWith(annotation))) {
if (utils.element().isAnnotationPresent(configMappingWithoutConfigRoot, Types.ANNOTATION_CONFIG_ROOT)) {
continue;
}
final PackageElement pkg = utils.element().getPackageOf(configMappingWithoutConfigRoot);
if (pkg == null) {
utils.processingEnv().getMessager().printMessage(Diagnostic.Kind.ERROR,
"Element " + configMappingWithoutConfigRoot + " has no enclosing package");
continue;
}
if (isConfigMappingWithoutConfigRootAlreadyHandled(configMappingWithoutConfigRoot)) {
continue;
}
debug("Detected config mapping without config root: " + configMappingWithoutConfigRoot,
configMappingWithoutConfigRoot);
try {
// we need to forge a dummy DiscoveryConfigRoot
// it's mostly ignored in the listeners, except for checking if it's a config mapping (for mixed modules)
DiscoveryConfigRoot discoveryConfigRoot = new DiscoveryConfigRoot(config.getExtension(), "dummy", "dummy",
utils.element().getBinaryName(configMappingWithoutConfigRoot),
configMappingWithoutConfigRoot.getQualifiedName().toString(),
ConfigPhase.BUILD_TIME, null, true);
scanElement(configMappingWithoutConfigRootListeners, discoveryConfigRoot, configMappingWithoutConfigRoot);
} catch (Exception e) {
throw new IllegalStateException(
"Unable to scan config mapping without config root: " + configMappingWithoutConfigRoot, e);
}
}
}
public ConfigCollector finalizeProcessing() {
applyListeners(configRootListeners, l -> l.finalizeProcessing());
applyListeners(configMappingWithoutConfigRootListeners, l -> l.finalizeProcessing());
return configCollector;
}
private void scanElement(List<ConfigAnnotationListener> listeners, DiscoveryRootElement configRootElement,
TypeElement clazz) {
// we scan the superclass and interfaces first so that the local elements can potentially override them
if (clazz.getKind() == ElementKind.INTERFACE) {
List<? extends TypeMirror> superInterfaces = clazz.getInterfaces();
for (TypeMirror superInterface : superInterfaces) {
TypeElement superInterfaceTypeElement = (TypeElement) ((DeclaredType) superInterface).asElement();
debug("Detected superinterface: " + superInterfaceTypeElement, clazz);
applyListeners(listeners, l -> l.onInterface(configRootElement, superInterfaceTypeElement));
scanElement(listeners, configRootElement, superInterfaceTypeElement);
}
} else {
TypeMirror superclass = clazz.getSuperclass();
if (superclass.getKind() != TypeKind.NONE
&& !utils.element().getQualifiedName(superclass).equals(Object.class.getName())) {
TypeElement superclassTypeElement = (TypeElement) ((DeclaredType) superclass).asElement();
debug("Detected superclass: " + superclassTypeElement, clazz);
applyListeners(listeners, l -> l.onSuperclass(configRootElement, clazz));
scanElement(listeners, configRootElement, superclassTypeElement);
}
}
for (Element e : clazz.getEnclosedElements()) {
switch (e.getKind()) {
case INTERFACE: {
// We don't need to catch the enclosed
|
ConfigAnnotationScanner
|
java
|
apache__camel
|
components/camel-attachments/src/test/java/org/apache/camel/attachment/MessageWithAttachmentRedeliveryIssueTest.java
|
{
"start": 1404,
"end": 3306
}
|
class ____ extends CamelTestSupport {
@Test
void testMessageWithAttachmentRedeliveryIssue() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
template.send("direct:start", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setBody("Hello World");
exchange.getIn(AttachmentMessage.class).addAttachment("message1.xml",
new DataHandler(new FileDataSource(new File("src/test/data/message1.xml"))));
exchange.getIn(AttachmentMessage.class).addAttachmentObject("message2.xml",
new DefaultAttachment(new FileDataSource(new File("src/test/data/message2.xml"))));
}
});
MockEndpoint.assertIsSatisfied(context);
AttachmentMessage msg = getMockEndpoint("mock:result").getReceivedExchanges().get(0).getIn(AttachmentMessage.class);
assertNotNull(msg);
assertEquals("Hello World", msg.getBody());
assertTrue(msg.hasAttachments());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
onException(Exception.class).maximumRedeliveries(3).redeliveryDelay(0);
from("direct:start")
.process(new Processor() {
private int counter;
@Override
public void process(Exchange exchange) {
if (counter++ < 2) {
throw new IllegalArgumentException("Forced");
}
}
}).to("mock:result");
}
};
}
}
|
MessageWithAttachmentRedeliveryIssueTest
|
java
|
google__guava
|
android/guava-testlib/test/com/google/common/testing/anotherpackage/ForwardingWrapperTesterTest.java
|
{
"start": 11910,
"end": 13243
}
|
class ____ implements ParameterTypesDifferent {
private final ParameterTypesDifferent delegate;
ParameterTypesDifferentForwarder(ParameterTypesDifferent delegate) {
this.delegate = delegate;
}
@Override
public void foo(
String s,
Runnable r,
Number n,
Iterable<?> it,
boolean b,
Equivalence<String> eq,
Exception e,
InputStream in,
Comparable<?> c,
Ordering<Integer> ord,
Charset charset,
TimeUnit unit,
Class<?> cls,
Joiner joiner,
Pattern pattern,
UnsignedInteger ui,
UnsignedLong ul,
StringBuilder sb,
Predicate<?> pred,
Function<?, ?> func,
Object obj) {
delegate.foo(
s, r, n, it, b, eq, e, in, c, ord, charset, unit, cls, joiner, pattern, ui, ul, sb, pred,
func, obj);
}
@Override
public String toString() {
return delegate.toString();
}
}
public void testCovariantReturn() {
new ForwardingWrapperTester()
.testForwarding(
Sub.class,
new Function<Sub, Sub>() {
@Override
public Sub apply(Sub sub) {
return new ForwardingSub(sub);
}
});
}
|
ParameterTypesDifferentForwarder
|
java
|
spring-projects__spring-security
|
webauthn/src/main/java/org/springframework/security/web/webauthn/api/ImmutablePublicKeyCredentialUserEntity.java
|
{
"start": 5743,
"end": 7070
}
|
class ____ {
@SuppressWarnings("NullAway.Init")
private String name;
@SuppressWarnings("NullAway.Init")
private Bytes id;
private @Nullable String displayName;
private PublicKeyCredentialUserEntityBuilder() {
}
/**
* Sets the {@link #getName()} property.
* @param name the name
* @return the {@link PublicKeyCredentialUserEntityBuilder}
*/
public PublicKeyCredentialUserEntityBuilder name(String name) {
this.name = name;
return this;
}
/**
* Sets the {@link #getId()} property.
* @param id the id
* @return the {@link PublicKeyCredentialUserEntityBuilder}
*/
public PublicKeyCredentialUserEntityBuilder id(Bytes id) {
this.id = id;
return this;
}
/**
* Sets the {@link #getDisplayName()} property.
* @param displayName the display name
* @return the {@link PublicKeyCredentialUserEntityBuilder}
*/
public PublicKeyCredentialUserEntityBuilder displayName(String displayName) {
this.displayName = displayName;
return this;
}
/**
* Builds a new {@link PublicKeyCredentialUserEntity}
* @return a new {@link PublicKeyCredentialUserEntity}
*/
public PublicKeyCredentialUserEntity build() {
return new ImmutablePublicKeyCredentialUserEntity(this.name, this.id, this.displayName);
}
}
}
|
PublicKeyCredentialUserEntityBuilder
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/reference/javaconfig/JavaConfigReferenceBeanTest.java
|
{
"start": 21048,
"end": 21537
}
|
class ____ {
// The 'interfaceClass' or 'interfaceName' attribute value of @DubboReference annotation is inconsistent with
// the generic type of the ReferenceBean returned by the bean method.
@Bean
@DubboReference(group = "${myapp.group}", interfaceClass = DemoService.class)
public ReferenceBean<HelloService> helloService() {
return new ReferenceBean();
}
}
@Configuration
public static
|
InconsistentBeanConfiguration
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/typesafe/Monks.java
|
{
"start": 200,
"end": 317
}
|
class ____ {
static native TemplateInstance monk(String name);
}
@CheckedTemplate
static
|
Templates
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperatorLifecycleTest.java
|
{
"start": 5487,
"end": 9969
}
|
interface ____.apache.flink.api.common.functions.OpenContext], setRuntimeContext[interface "
+ "org.apache.flink.api.common.functions.RuntimeContext]]";
private static final List<String> ACTUAL_ORDER_TRACKING =
Collections.synchronizedList(new ArrayList<String>(EXPECTED_CALL_ORDER_FULL.size()));
@Test
void testAllMethodsRegisteredInTest() {
List<String> methodsWithSignatureString = new ArrayList<>();
for (Method method : StreamOperator.class.getMethods()) {
methodsWithSignatureString.add(
method.getName() + Arrays.toString(method.getParameterTypes()));
}
Collections.sort(methodsWithSignatureString);
assertThat(methodsWithSignatureString)
.as(
"It seems like new methods have been introduced to "
+ StreamOperator.class
+ ". Please register them with this test and ensure to document their position in the lifecycle "
+ "(if applicable).")
.hasToString(ALL_METHODS_STREAM_OPERATOR);
methodsWithSignatureString = new ArrayList<>();
for (Method method : RichFunction.class.getMethods()) {
methodsWithSignatureString.add(
method.getName() + Arrays.toString(method.getParameterTypes()));
}
Collections.sort(methodsWithSignatureString);
assertThat(methodsWithSignatureString)
.as(
"It seems like new methods have been introduced to "
+ RichFunction.class
+ ". Please register them with this test and ensure to document their position in the lifecycle "
+ "(if applicable).")
.hasToString(ALL_METHODS_RICH_FUNCTION);
}
@Test
void testLifeCycleFull() throws Exception {
ACTUAL_ORDER_TRACKING.clear();
Configuration taskManagerConfig = new Configuration();
StreamConfig cfg = new StreamConfig(new Configuration());
MockSourceFunction srcFun = new MockSourceFunction();
cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, true));
cfg.setOperatorID(new OperatorID());
try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
Task task =
StreamTaskTest.createTask(
SourceStreamTask.class,
shuffleEnvironment,
cfg,
taskManagerConfig,
EXECUTOR_RESOURCE.getExecutor());
task.startTaskThread();
LifecycleTrackingStreamSource.runStarted.await();
// wait for clean termination
task.getExecutingThread().join();
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FINISHED);
assertThat(ACTUAL_ORDER_TRACKING).isEqualTo(EXPECTED_CALL_ORDER_FULL);
}
}
@Test
void testLifeCycleCancel() throws Exception {
ACTUAL_ORDER_TRACKING.clear();
Configuration taskManagerConfig = new Configuration();
StreamConfig cfg = new StreamConfig(new Configuration());
MockSourceFunction srcFun = new MockSourceFunction();
cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, false));
cfg.setOperatorID(new OperatorID());
try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
Task task =
StreamTaskTest.createTask(
SourceStreamTask.class,
shuffleEnvironment,
cfg,
taskManagerConfig,
EXECUTOR_RESOURCE.getExecutor());
task.startTaskThread();
LifecycleTrackingStreamSource.runStarted.await();
// this should cancel the task even though it is blocked on runFinished
task.cancelExecution();
// wait for clean termination
task.getExecutingThread().join();
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
assertThat(ACTUAL_ORDER_TRACKING).isEqualTo(EXPECTED_CALL_ORDER_CANCEL_RUNNING);
}
}
private static
|
org
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils/src/test/java/org/apache/flink/types/PojoTestUtilsTest.java
|
{
"start": 2428,
"end": 2530
}
|
class ____ {
public List<?> x;
}
@TypeInfo(FooFactory.class)
public
|
PojoRequiringKryo
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ClassNameTest.java
|
{
"start": 3478,
"end": 3793
}
|
interface ____ {}
""")
.doTest();
}
@Test
public void negativeIsPublic() {
compilationHelper
.addSourceLines(
"b/B.java",
"""
package b;
// BUG: Diagnostic contains: should be declared in a file named Test.java
public
|
B
|
java
|
apache__camel
|
core/camel-management/src/main/java/org/apache/camel/management/mbean/ManagedProducer.java
|
{
"start": 1101,
"end": 1752
}
|
class ____ extends ManagedService implements ManagedProducerMBean {
private final Producer producer;
public ManagedProducer(CamelContext context, Producer producer) {
super(context, producer);
this.producer = producer;
}
public Producer getProducer() {
return producer;
}
@Override
public String getEndpointUri() {
return producer.getEndpoint().getEndpointUri();
}
@Override
public boolean isSingleton() {
return producer.isSingleton();
}
@Override
public boolean isRemoteEndpoint() {
return producer.getEndpoint().isRemote();
}
}
|
ManagedProducer
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/simp/annotation/support/SendToMethodReturnValueHandler.java
|
{
"start": 10214,
"end": 11278
}
|
class ____ {
private final PlaceholderResolver placeholderResolver;
private final @Nullable SendTo sendTo;
private final @Nullable SendToUser sendToUser;
public DestinationHelper(MessageHeaders headers, @Nullable SendToUser sendToUser, @Nullable SendTo sendTo) {
Map<String, String> variables = getTemplateVariables(headers);
this.placeholderResolver = variables::get;
this.sendTo = sendTo;
this.sendToUser = sendToUser;
}
@SuppressWarnings("unchecked")
private Map<String, String> getTemplateVariables(MessageHeaders headers) {
String name = DestinationVariableMethodArgumentResolver.DESTINATION_TEMPLATE_VARIABLES_HEADER;
return (Map<String, String>) headers.getOrDefault(name, Collections.emptyMap());
}
public @Nullable SendTo getSendTo() {
return this.sendTo;
}
public @Nullable SendToUser getSendToUser() {
return this.sendToUser;
}
public String expandTemplateVars(String destination) {
return placeholderHelper.replacePlaceholders(destination, this.placeholderResolver);
}
}
}
|
DestinationHelper
|
java
|
apache__kafka
|
tools/src/main/java/org/apache/kafka/tools/AclCommand.java
|
{
"start": 34301,
"end": 35202
}
|
class ____ extends EnumConverter<PatternType> {
PatternTypeConverter() {
super(PatternType.class);
}
@Override
public PatternType convert(String value) {
PatternType patternType = super.convert(value);
if (patternType.isUnknown())
throw new ValueConversionException("Unknown resource-pattern-type: " + value);
return patternType;
}
@Override
public String valuePattern() {
List<PatternType> values = List.of(PatternType.values());
List<PatternType> filteredValues = values.stream()
.filter(type -> type != PatternType.UNKNOWN)
.toList();
return filteredValues.stream()
.map(Object::toString)
.collect(Collectors.joining("|"));
}
}
}
|
PatternTypeConverter
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/writing/ComponentImplementation.java
|
{
"start": 40093,
"end": 52953
}
|
class ____ from a
// separate build unit (see https://github.com/google/dagger/issues/3401).
method.getParameters()
.forEach(
param ->
method.addStatement(
"%T.checkNotNull(%N)",
XTypeNames.DAGGER_PRECONDITIONS,
param.getName())); // SUPPRESS_GET_NAME_CHECK
method.addStatement(
"return %L",
XCodeBlock.ofNewInstance(
name(),
"%L",
Stream.concat(
creatorComponentFields().stream().map(field -> XCodeBlock.of("%N", field)),
method.getParameters().stream()
.map(param -> XCodeBlock.of("%N", param.getName()))) // SUPPRESS_GET_NAME_CHECK
.collect(toParametersCodeBlock())));
parent.get().getComponentShard().addMethod(COMPONENT_METHOD, method.build());
}
private void addInterfaceMethods() {
// Each component method may have been declared by several supertypes. We want to implement
// only one method for each distinct signature.
XType componentType = graph.componentTypeElement().getType();
Set<MethodSignature> methodDescriptors = new HashSet<>();
for (ComponentMethodDescriptor method : graph.entryPointMethods()) {
MethodSignature signature =
MethodSignature.forComponentMethod(method, componentType, processingEnv);
if (methodDescriptors.add(signature)) {
addMethod(
COMPONENT_METHOD,
componentRequestRepresentationsProvider.get().getComponentMethod(method));
}
}
}
private void addChildComponents() {
for (BindingGraph subgraph : graph.subgraphs()) {
topLevelImplementation()
.addType(
TypeSpecKind.COMPONENT_IMPL,
childComponentImplementationFactory.create(subgraph).generate());
}
}
private void addShards() {
// Generate all shards and add them to this component implementation.
for (ShardImplementation shard : ImmutableSet.copyOf(shardsByBinding.get().values())) {
if (shardFieldsByImplementation.containsKey(shard)) {
addField(FieldSpecKind.COMPONENT_SHARD_FIELD, shardFieldsByImplementation.get(shard));
topLevelImplementation().addType(TypeSpecKind.COMPONENT_SHARD_TYPE, shard.generate());
}
}
}
/** Creates and adds the constructor and methods needed for initializing the component. */
private void addConstructorAndInitializationMethods() {
XFunSpecs.Builder constructor = constructorBuilder();
// TODO(bcorso): remove once dagger.generatedClassExtendsComponent flag is removed.
if (!isShardClassPrivate()) {
constructor.addModifiers(PRIVATE);
}
ImmutableList<XParameterSpec> parameters = constructorParameters.values().asList();
// Add a constructor parameter and initialization for each component field. We initialize
// these fields immediately so that we don't need to be pass them to each initialize method
// and shard constructor.
componentFieldsByImplementation()
.forEach(
(componentImplementation, field) -> {
if (isComponentShard()
&& componentImplementation.equals(ComponentImplementation.this)) {
// For the self-referenced component field,
// just initialize it in the initializer.
addField(
FieldSpecKind.COMPONENT_REQUIREMENT_FIELD,
field.toBuilder().initializer(XCodeBlock.of("this")).build());
} else {
addField(FieldSpecKind.COMPONENT_REQUIREMENT_FIELD, field);
constructor.addStatement("this.%1N = %1N", field);
constructor.addParameter(
field.getName(), field.getType()); // SUPPRESS_GET_NAME_CHECK
}
});
if (isComponentShard()) {
constructor.addCode(concat(componentRequirementInitializations));
}
constructor.addParameters(parameters);
// TODO(cgdecker): It's not the case that each initialize() method has need for all of the
// given parameters. In some cases, those parameters may have already been assigned to fields
// which could be referenced instead. In other cases, an initialize method may just not need
// some of the parameters because the set of initializations in that partition does not
// include any reference to them. Right now, the Dagger code has no way of getting that
// information because, among other things, componentImplementation.getImplementations() just
// returns a bunch of CodeBlocks with no semantic information. Additionally, we may not know
// yet whether a field will end up needing to be created for a specific requirement, and we
// don't want to create a field that ends up only being used during initialization.
XCodeBlock args = parameterNames(parameters);
ImmutableList<XFunSpec> initializationMethods =
createPartitionedMethods(
"initialize",
// TODO(bcorso): Rather than passing in all of the constructor parameters, keep track
// of which parameters are used during initialization and only pass those. This could
// be useful for FastInit, where most of the initializations are just calling
// SwitchingProvider with no parameters.
makeFinal(parameters),
initializations,
methodName ->
methodBuilder(methodName)
/* TODO(gak): Strictly speaking, we only need the suppression here if we are
* also initializing a raw field in this method, but the structure of this
* code makes it awkward to pass that bit through. This will be cleaned up
* when we no longer separate fields and initialization as we do now. */
.addAnnotation(suppressWarnings(UNCHECKED)));
for (XFunSpec initializationMethod : initializationMethods) {
constructor.addStatement("%N(%L)", initializationMethod, args);
addMethod(MethodSpecKind.INITIALIZE_METHOD, initializationMethod);
}
if (isComponentShard()) {
constructor.addCode(concat(shardInitializations));
} else {
// This initialization is called from the componentShard, so we need to use those args.
XCodeBlock componentArgs =
parameterNames(componentShard.constructorParameters.values().asList());
XCodeBlock componentFields =
componentFieldsByImplementation().values().stream()
.map(field -> XCodeBlock.of("%N", field))
.collect(toParametersCodeBlock());
shardInitializations.add(
XCodeBlock.of(
"%N = %L;",
shardFieldsByImplementation.get(this),
XCodeBlock.ofNewInstance(
name,
"%L",
isEmpty(componentArgs)
? componentFields
: makeParametersCodeBlock(
ImmutableList.of(componentFields, componentArgs)))));
}
addMethod(MethodSpecKind.CONSTRUCTOR, constructor.build());
}
private void addCancellationListenerImplementation() {
XFunSpecs.Builder methodBuilder =
methodBuilder(CANCELLATION_LISTENER_METHOD_NAME)
.isOverride(true)
.addModifiers(PUBLIC)
.addParameter(MAY_INTERRUPT_IF_RUNNING_PARAM);
// Reversing should order cancellations starting from entry points and going down to leaves
// rather than the other way around. This shouldn't really matter but seems *slightly*
// preferable because:
// When a future that another future depends on is cancelled, that cancellation will propagate
// up the future graph toward the entry point. Cancelling in reverse order should ensure that
// everything that depends on a particular node has already been cancelled when that node is
// cancelled, so there's no need to propagate. Otherwise, when we cancel a leaf node, it might
// propagate through most of the graph, making most of the cancel calls that follow in the
// onProducerFutureCancelled method do nothing.
if (isComponentShard()) {
methodBuilder.addCode(concat(ImmutableList.copyOf(shardCancellations).reverse()));
} else if (!cancellations.isEmpty()) {
shardCancellations.add(
XCodeBlock.of(
"%N.%N(%N);",
shardFieldsByImplementation.get(this),
CANCELLATION_LISTENER_METHOD_NAME,
MAY_INTERRUPT_IF_RUNNING_PARAM.getName())); // SUPPRESS_GET_NAME_CHECK
}
ImmutableList<XCodeBlock> cancellationStatements =
ImmutableList.copyOf(cancellations.values()).reverse();
if (cancellationStatements.size() < STATEMENTS_PER_METHOD) {
methodBuilder.addCode(concat(cancellationStatements));
} else {
ImmutableList<XFunSpec> cancelProducersMethods =
createPartitionedMethods(
"cancelProducers",
ImmutableList.of(MAY_INTERRUPT_IF_RUNNING_PARAM),
cancellationStatements,
methodName -> methodBuilder(methodName).addModifiers(PRIVATE));
for (XFunSpec cancelProducersMethod : cancelProducersMethods) {
methodBuilder.addStatement(
"%N(%N)",
cancelProducersMethod,
MAY_INTERRUPT_IF_RUNNING_PARAM.getName()); // SUPPRESS_GET_NAME_CHECK
addMethod(MethodSpecKind.CANCELLATION_LISTENER_METHOD, cancelProducersMethod);
}
}
if (isComponentShard()) {
cancelParentStatement().ifPresent(methodBuilder::addCode);
}
addMethod(MethodSpecKind.CANCELLATION_LISTENER_METHOD, methodBuilder.build());
}
private Optional<XCodeBlock> cancelParentStatement() {
if (!shouldPropagateCancellationToParent()) {
return Optional.empty();
}
return Optional.of(
XCodeBlock.builder()
.addStatement(
"%L.%N(%N)",
parent.get().componentFieldReference(),
CANCELLATION_LISTENER_METHOD_NAME,
MAY_INTERRUPT_IF_RUNNING_PARAM.getName()) // SUPPRESS_GET_NAME_CHECK
.build());
}
private boolean shouldPropagateCancellationToParent() {
return parent.isPresent()
&& parent
.get()
.componentDescriptor()
.cancellationPolicy()
.map(policy -> policy.equals(CancellationPolicy.PROPAGATE))
.orElse(false);
}
/**
* Creates one or more methods, all taking the given {@code parameters}, which partition the
* given list of {@code statements} among themselves such that no method has more than {@code
* STATEMENTS_PER_METHOD} statements in it and such that the returned methods, if called in
* order, will execute the {@code statements} in the given order.
*/
private ImmutableList<XFunSpec> createPartitionedMethods(
String methodName,
Collection<XParameterSpec> parameters,
List<XCodeBlock> statements,
Function<String, XFunSpecs.Builder> methodBuilderCreator) {
return Lists.partition(statements, STATEMENTS_PER_METHOD).stream()
.map(
partition ->
methodBuilderCreator
.apply(getUniqueMethodName(methodName))
.addModifiers(PRIVATE)
.addParameters(parameters)
.addCode(concat(partition))
.build())
.collect(toImmutableList());
}
}
private static ImmutableList<ComponentRequirement> constructorRequirements(BindingGraph graph) {
if (graph.componentDescriptor().hasCreator()) {
return graph.componentRequirements().asList();
} else if (graph.factoryMethod().isPresent()) {
return graph.factoryMethodParameters().keySet().asList();
} else {
throw new AssertionError(
"Expected either a component creator or factory method but found neither.");
}
}
private static ImmutableList<XParameterSpec> makeFinal(List<XParameterSpec> parameters) {
return parameters.stream()
.map(
param ->
XParameterSpecs.builder(param.getName(), param.getType()) // SUPPRESS_GET_NAME_CHECK
.addModifiers(FINAL)
.build())
.collect(toImmutableList());
}
}
|
dependency
|
java
|
elastic__elasticsearch
|
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java
|
{
"start": 3006,
"end": 4005
}
|
class ____ implements DoubleUnaryOperator {
protected final double max;
protected final double min;
protected final double sum;
protected final double mean;
protected final int count;
@SuppressWarnings("HiddenField")
SinglePassSimpleStatisticsMethod(double[] values) {
int count = 0;
double sum = 0.0;
double min = Double.MAX_VALUE;
double max = Double.MIN_VALUE;
for (double value : values) {
if (Double.isNaN(value) == false) {
count += 1;
min = Math.min(value, min);
max = Math.max(value, max);
sum += value;
}
}
this.count = count;
this.min = min;
this.max = max;
this.sum = sum;
this.mean = this.count == 0 ? Double.NaN : this.sum / this.count;
}
}
}
|
SinglePassSimpleStatisticsMethod
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/jmh/java/org/springframework/messaging/simp/broker/DefaultSubscriptionRegistryBenchmark.java
|
{
"start": 3265,
"end": 4477
}
|
class ____ {
@Param({"none", "sameDestination", "sameSession"})
String contention;
public String session;
public Message<?> subscribe;
public String findDestination;
public Message<?> unsubscribe;
@Setup(Level.Trial)
public void doSetup(ServerState serverState) {
int uniqueNumber = serverState.uniqueIdGenerator.incrementAndGet();
if ("sameDestination".equals(this.contention)) {
this.findDestination = serverState.destinationIds[0];
}
else {
this.findDestination = serverState.destinationIds[uniqueNumber % serverState.destinationIds.length];
}
if ("sameSession".equals(this.contention)) {
this.session = serverState.sessionIds[0];
}
else {
this.session = serverState.sessionIds[uniqueNumber % serverState.sessionIds.length];
}
String subscription = String.valueOf(uniqueNumber);
String subscribeDestination = "patternSubscriptions".equals(serverState.specialization) ?
"/**/" + this.findDestination : this.findDestination;
this.subscribe = subscribeMessage(this.session, subscription, subscribeDestination);
this.unsubscribe = unsubscribeMessage(this.session, subscription);
}
}
@State(Scope.Thread)
public static
|
Requests
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DelegateRequestRepresentationTest.java
|
{
"start": 2140,
"end": 2432
}
|
class ____ {",
" @Inject ReusableScoped() {}",
"}");
private static final Source UNSCOPED =
CompilerTests.javaSource(
"test.Unscoped",
"package test;",
"",
"import javax.inject.Inject;",
"",
"
|
ReusableScoped
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_avoid_ambiguous_reference_compilation_error_Test.java
|
{
"start": 766,
"end": 1373
}
|
class ____ {
@Test
void should_not_report_ambiguous_reference_compilation_error() {
// does not compile, explanation: https://stackoverflow.com/questions/29499847/ambiguous-method-in-java-8-why
// Assertions.assertThat(getDate()).isEqualTo(getDate());
// compiles since AssertionsForClassTypes does not provide assertThat for interfaces
AssertionsForClassTypes.assertThat(getDate()).isEqualTo(getDate());
}
@SuppressWarnings("unchecked")
protected static <T extends Date> T getDate() {
return (T) new Date(123);
}
}
|
Assertions_avoid_ambiguous_reference_compilation_error_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/LazyCollectionDeletedAllowProxyTest.java
|
{
"start": 2286,
"end": 5210
}
|
class ____ {
private Long postId;
@Test
public void updatingAnAttributeDoesNotDeleteLazyCollectionsTest(SessionFactoryScope scope) {
scope.inTransaction( s -> {
Query query = s.createQuery( "from AdditionalDetails where id = :id" );
query.setParameter( "id", postId );
AdditionalDetails additionalDetails = (AdditionalDetails) query.getSingleResult();
additionalDetails.setDetails( "New data" );
s.persist( additionalDetails );
} );
scope.inTransaction( s -> {
Query query = s.createQuery( "from Post where id = :id" );
query.setParameter( "id", postId );
Post retrievedPost = (Post) query.getSingleResult();
assertFalse( retrievedPost.getTags().isEmpty(), "No tags found" );
retrievedPost.getTags().forEach( tag -> assertNotNull( tag ) );
} );
scope.inTransaction( s -> {
Query query = s.createQuery( "from AdditionalDetails where id = :id" );
query.setParameter( "id", postId );
AdditionalDetails additionalDetails = (AdditionalDetails) query.getSingleResult();
Post post = additionalDetails.getPost();
assertIsEnhancedProxy( post );
post.setMessage( "new message" );
} );
scope.inTransaction( s -> {
Query query = s.createQuery( "from Post where id = :id" );
query.setParameter( "id", postId );
Post retrievedPost = (Post) query.getSingleResult();
assertEquals( "new message", retrievedPost.getMessage() );
assertFalse( retrievedPost.getTags().isEmpty(), "No tags found" );
retrievedPost.getTags().forEach( tag -> {
assertNotNull( tag );
assertFalse( tag.getLabels().isEmpty(), "No Labels found" );
} );
} );
}
@BeforeEach
public void prepare(SessionFactoryScope scope) {
scope.inTransaction( s -> {
Post post = new Post();
Tag tag1 = new Tag( "tag1" );
Tag tag2 = new Tag( "tag2" );
Label label1 = new Label( "label1" );
Label label2 = new Label( "label2" );
tag1.addLabel( label1 );
tag2.addLabel( label2 );
Set<Tag> tagSet = new HashSet<>();
tagSet.add( tag1 );
tagSet.add( tag2 );
post.setTags( tagSet );
AdditionalDetails details = new AdditionalDetails();
details.setPost( post );
post.setAdditionalDetails( details );
details.setDetails( "Some data" );
s.persist( post );
postId = post.id;
} );
}
@AfterEach
public void cleanData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
private void assertIsEnhancedProxy(Object entity) {
assertThat( entity, instanceOf( PersistentAttributeInterceptable.class ) );
final PersistentAttributeInterceptable interceptable = (PersistentAttributeInterceptable) entity;
final PersistentAttributeInterceptor interceptor = interceptable.$$_hibernate_getInterceptor();
assertThat( interceptor, instanceOf( EnhancementAsProxyLazinessInterceptor.class ) );
}
// --- //
@Entity(name = "Tag")
@Table(name = "TAG")
static
|
LazyCollectionDeletedAllowProxyTest
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/rx/RedissonDequeReactiveTest.java
|
{
"start": 229,
"end": 3883
}
|
class ____ extends BaseRxTest {
@Test
public void testRemoveLastOccurrence() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
sync(queue1.addFirst(3));
sync(queue1.addFirst(1));
sync(queue1.addFirst(2));
sync(queue1.addFirst(3));
sync(queue1.removeLastOccurrence(3));
assertThat(sync(queue1)).containsExactly(3, 2, 1);
}
@Test
public void testRemoveFirstOccurrence() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
sync(queue1.addFirst(3));
sync(queue1.addFirst(1));
sync(queue1.addFirst(2));
sync(queue1.addFirst(3));
sync(queue1.removeFirstOccurrence(3));
assertThat(sync(queue1)).containsExactly(2, 1, 3);
}
@Test
public void testRemoveLast() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
sync(queue1.addFirst(1));
sync(queue1.addFirst(2));
sync(queue1.addFirst(3));
Assertions.assertEquals(1, (int)sync(queue1.removeLast()));
Assertions.assertEquals(2, (int)sync(queue1.removeLast()));
Assertions.assertEquals(3, (int)sync(queue1.removeLast()));
}
@Test
public void testRemoveFirst() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
sync(queue1.addFirst(1));
sync(queue1.addFirst(2));
sync(queue1.addFirst(3));
Assertions.assertEquals(3, (int)sync(queue1.removeFirst()));
Assertions.assertEquals(2, (int)sync(queue1.removeFirst()));
Assertions.assertEquals(1, (int)sync(queue1.removeFirst()));
}
@Test
public void testPeek() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
Assertions.assertNull(sync(queue1.peekFirst()));
Assertions.assertNull(sync(queue1.peekLast()));
sync(queue1.addFirst(2));
Assertions.assertEquals(2, (int)sync(queue1.peekFirst()));
Assertions.assertEquals(2, (int)sync(queue1.peekLast()));
}
@Test
public void testPollLastAndOfferFirstTo() {
RDequeRx<Integer> queue1 = redisson.getDeque("deque1");
sync(queue1.addFirst(3));
sync(queue1.addFirst(2));
sync(queue1.addFirst(1));
RDequeRx<Integer> queue2 = redisson.getDeque("deque2");
sync(queue2.addFirst(6));
sync(queue2.addFirst(5));
sync(queue2.addFirst(4));
sync(queue1.pollLastAndOfferFirstTo(queue2.getName()));
assertThat(sync(queue2)).containsExactly(3, 4, 5, 6);
}
@Test
public void testAddFirst() {
RDequeRx<Integer> queue = redisson.getDeque("deque");
sync(queue.addFirst(1));
sync(queue.addFirst(2));
sync(queue.addFirst(3));
assertThat(sync(queue)).containsExactly(3, 2, 1);
}
@Test
public void testAddLast() {
RDequeRx<Integer> queue = redisson.getDeque("deque");
sync(queue.addLast(1));
sync(queue.addLast(2));
sync(queue.addLast(3));
assertThat(sync(queue)).containsExactly(1, 2, 3);
}
@Test
public void testOfferFirst() {
RDequeRx<Integer> queue = redisson.getDeque("deque");
sync(queue.offerFirst(1));
sync(queue.offerFirst(2));
sync(queue.offerFirst(3));
assertThat(sync(queue)).containsExactly(3, 2, 1);
}
@Test
public void testDescendingIterator() {
final RDequeRx<Integer> queue = redisson.getDeque("deque");
sync(queue.addAll(Arrays.asList(1, 2, 3)));
assertThat(toIterator(queue.descendingIterator())).toIterable().containsExactly(3, 2, 1);
}
}
|
RedissonDequeReactiveTest
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/threadpool/support/eager/TaskQueueTest.java
|
{
"start": 1236,
"end": 3178
}
|
class ____ {
private TaskQueue<Runnable> queue;
private EagerThreadPoolExecutor executor;
@BeforeEach
void setup() {
queue = new TaskQueue<Runnable>(1);
executor = mock(EagerThreadPoolExecutor.class);
queue.setExecutor(executor);
}
@Test
void testOffer1() throws Exception {
Assertions.assertThrows(RejectedExecutionException.class, () -> {
TaskQueue<Runnable> queue = new TaskQueue<Runnable>(1);
queue.offer(mock(Runnable.class));
});
}
@Test
void testOffer2() throws Exception {
Mockito.when(executor.getPoolSize()).thenReturn(2);
Mockito.when(executor.getActiveCount()).thenReturn(1);
assertThat(queue.offer(mock(Runnable.class)), is(true));
}
@Test
void testOffer3() throws Exception {
Mockito.when(executor.getPoolSize()).thenReturn(2);
Mockito.when(executor.getActiveCount()).thenReturn(2);
Mockito.when(executor.getMaximumPoolSize()).thenReturn(4);
assertThat(queue.offer(mock(Runnable.class)), is(false));
}
@Test
void testOffer4() throws Exception {
Mockito.when(executor.getPoolSize()).thenReturn(4);
Mockito.when(executor.getActiveCount()).thenReturn(4);
Mockito.when(executor.getMaximumPoolSize()).thenReturn(4);
assertThat(queue.offer(mock(Runnable.class)), is(true));
}
@Test
void testRetryOffer1() throws Exception {
Assertions.assertThrows(RejectedExecutionException.class, () -> {
Mockito.when(executor.isShutdown()).thenReturn(true);
queue.retryOffer(mock(Runnable.class), 1000, TimeUnit.MILLISECONDS);
});
}
@Test
void testRetryOffer2() throws Exception {
Mockito.when(executor.isShutdown()).thenReturn(false);
assertThat(queue.retryOffer(mock(Runnable.class), 1000, TimeUnit.MILLISECONDS), is(true));
}
}
|
TaskQueueTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-metrics/src/test/java/org/springframework/boot/micrometer/metrics/autoconfigure/system/SystemMetricsAutoConfigurationTests.java
|
{
"start": 4855,
"end": 5047
}
|
class ____ {
@Bean
ProcessorMetrics customProcessorMetrics() {
return new ProcessorMetrics();
}
}
@Configuration(proxyBeanMethods = false)
static
|
CustomProcessorMetricsConfiguration
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/extractor/ResultOfExtractor.java
|
{
"start": 877,
"end": 1270
}
|
class ____ implements Function<Object, Object> {
private final String methodName;
ResultOfExtractor(String methodName) {
this.methodName = methodName;
}
/**
* Behavior is described in {@link MethodSupport#methodResultFor(Object, String)}
*/
@Override
public Object apply(Object input) {
return MethodSupport.methodResultFor(input, methodName);
}
}
|
ResultOfExtractor
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessarySetDefaultTest.java
|
{
"start": 1589,
"end": 2321
}
|
class ____ {
{
NullPointerTester tester = new NullPointerTester();
tester.setDefault(String.class, "");
tester
.setDefault(ImmutableList.class, ImmutableList.of(42))
.setDefault(ImmutableList.class, ImmutableList.of())
.setDefault(ImmutableList.class, ImmutableList.<String>of())
.setDefault(ImmutableList.class, ImmutableList.of(42));
}
}
""")
.addOutputLines(
"out/Test.java",
"""
import com.google.common.collect.ImmutableList;
import com.google.common.testing.NullPointerTester;
|
Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webmvc-test/src/test/java/org/springframework/boot/webmvc/test/autoconfigure/mockmvc/ExampleController1.java
|
{
"start": 1240,
"end": 1888
}
|
class ____ {
@GetMapping("/one")
public String one() {
return "one";
}
@GetMapping("/error")
public String error() {
throw new ExampleException();
}
@GetMapping(path = "/html", produces = "text/html")
public String html() {
return "<html><body>Hello</body></html>";
}
@GetMapping("/formatting")
public String formatting(WebRequest request) {
Object formattingFails = new Object() {
@Override
public String toString() {
throw new IllegalStateException("Formatting failed");
}
};
request.setAttribute("attribute-1", formattingFails, RequestAttributes.SCOPE_SESSION);
return "formatting";
}
}
|
ExampleController1
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableCreate.java
|
{
"start": 1739,
"end": 4187
}
|
class ____<T>
extends AtomicReference<Disposable>
implements ObservableEmitter<T>, Disposable {
private static final long serialVersionUID = -3434801548987643227L;
final Observer<? super T> observer;
CreateEmitter(Observer<? super T> observer) {
this.observer = observer;
}
@Override
public void onNext(T t) {
if (t == null) {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
if (!isDisposed()) {
observer.onNext(t);
}
}
@Override
public void onError(Throwable t) {
if (!tryOnError(t)) {
RxJavaPlugins.onError(t);
}
}
@Override
public boolean tryOnError(Throwable t) {
if (t == null) {
t = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
if (!isDisposed()) {
try {
observer.onError(t);
} finally {
dispose();
}
return true;
}
return false;
}
@Override
public void onComplete() {
if (!isDisposed()) {
try {
observer.onComplete();
} finally {
dispose();
}
}
}
@Override
public void setDisposable(Disposable d) {
DisposableHelper.set(this, d);
}
@Override
public void setCancellable(Cancellable c) {
setDisposable(new CancellableDisposable(c));
}
@Override
public ObservableEmitter<T> serialize() {
return new SerializedEmitter<>(this);
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
@Override
public String toString() {
return String.format("%s{%s}", getClass().getSimpleName(), super.toString());
}
}
/**
* Serializes calls to onNext, onError and onComplete.
*
* @param <T> the value type
*/
static final
|
CreateEmitter
|
java
|
spring-projects__spring-boot
|
module/spring-boot-hazelcast/src/main/java/org/springframework/boot/hazelcast/autoconfigure/health/HazelcastHealthContributorAutoConfiguration.java
|
{
"start": 2084,
"end": 2593
}
|
class ____
extends CompositeHealthContributorConfiguration<HazelcastHealthIndicator, HazelcastInstance> {
HazelcastHealthContributorAutoConfiguration() {
super(HazelcastHealthIndicator::new);
}
@Bean
@ConditionalOnMissingBean(name = { "hazelcastHealthIndicator", "hazelcastHealthContributor" })
HealthContributor hazelcastHealthContributor(ConfigurableListableBeanFactory beanFactory) {
return createContributor(beanFactory, HazelcastInstance.class);
}
}
|
HazelcastHealthContributorAutoConfiguration
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/config/properties/Engine.java
|
{
"start": 655,
"end": 721
}
|
interface ____ {
int getCylinders();
String start();
}
|
Engine
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/DataSourceBuilderTests.java
|
{
"start": 24712,
"end": 25344
}
|
class ____ extends org.apache.tomcat.jdbc.pool.DataSource {
private @Nullable String jdbcUrl;
private @Nullable String user;
private @Nullable String driverClass;
@Nullable String getJdbcUrl() {
return this.jdbcUrl;
}
void setJdbcUrl(@Nullable String jdbcUrl) {
this.jdbcUrl = jdbcUrl;
}
@Nullable String getUser() {
return this.user;
}
void setUser(@Nullable String user) {
this.user = user;
}
@Nullable String getDriverClass() {
return this.driverClass;
}
void setDriverClass(@Nullable String driverClass) {
this.driverClass = driverClass;
}
}
static
|
CustomTomcatDataSource
|
java
|
apache__camel
|
components/camel-snakeyaml/src/test/java/org/apache/camel/component/snakeyaml/model/TestPojo.java
|
{
"start": 863,
"end": 1707
}
|
class ____ {
private String name;
public TestPojo() {
}
public TestPojo(String name) {
this.name = name;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TestPojo pojo = (TestPojo) o;
return name != null ? name.equals(pojo.name) : pojo.name == null;
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public String toString() {
return "TestPojo {"
+ "name='" + name + '\''
+ '}';
}
}
|
TestPojo
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/java8stream/base/Target.java
|
{
"start": 404,
"end": 3101
}
|
class ____ {
private Stream<Integer> ints;
private Stream<Integer> targetStream;
private List<String> stringStream;
private Collection<String> stringCollection;
private Set<Integer> integerSet;
private Iterable<Integer> integerIterable;
private SortedSet<Integer> sortedSet;
private NavigableSet<Integer> navigableSet;
private List<String> intToStringStream;
private Integer[] stringArrayStream;
private List<TargetElement> targetElements;
public Stream<Integer> getInts() {
return ints;
}
public void setInts(Stream<Integer> ints) {
this.ints = ints;
}
public Stream<Integer> getTargetStream() {
return targetStream;
}
public void setTargetStream(Stream<Integer> targetStream) {
this.targetStream = targetStream;
}
public List<String> getStringStream() {
return stringStream;
}
public void setStringStream(List<String> stringStream) {
this.stringStream = stringStream;
}
public Collection<String> getStringCollection() {
return stringCollection;
}
public void setStringCollection(Collection<String> stringCollection) {
this.stringCollection = stringCollection;
}
public Set<Integer> getIntegerSet() {
return integerSet;
}
public void setIntegerSet(Set<Integer> integerSet) {
this.integerSet = integerSet;
}
public Iterable<Integer> getIntegerIterable() {
return integerIterable;
}
public void setIntegerIterable(Iterable<Integer> integerIterable) {
this.integerIterable = integerIterable;
}
public SortedSet<Integer> getSortedSet() {
return sortedSet;
}
public void setSortedSet(SortedSet<Integer> sortedSet) {
this.sortedSet = sortedSet;
}
public NavigableSet<Integer> getNavigableSet() {
return navigableSet;
}
public void setNavigableSet(NavigableSet<Integer> navigableSet) {
this.navigableSet = navigableSet;
}
public List<String> getIntToStringStream() {
return intToStringStream;
}
public void setIntToStringStream(List<String> intToStringStream) {
this.intToStringStream = intToStringStream;
}
public Integer[] getStringArrayStream() {
return stringArrayStream;
}
public void setStringArrayStream(Integer[] stringArrayStream) {
this.stringArrayStream = stringArrayStream;
}
public List<TargetElement> getTargetElements() {
return targetElements;
}
public void setTargetElements(List<TargetElement> targetElements) {
this.targetElements = targetElements;
}
}
|
Target
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsProjectionPushDown.java
|
{
"start": 1706,
"end": 2307
}
|
interface ____ not implemented, a projection is applied in a subsequent
* operation after the source.
*
* <p>For efficiency, a source can push a projection further down in order to be close to the actual
* data generation. A projection is only selecting fields that are used by a query (possibly in a
* different field order). It does not contain any computation. A projection can either be performed
* on the fields of the top-level row only or consider nested fields as well (see {@link
* #supportsNestedProjection()}).
*
* @see Projection
* @see ProjectedRowData
*/
@PublicEvolving
public
|
is
|
java
|
apache__camel
|
components/camel-debezium/camel-debezium-postgres/src/generated/java/org/apache/camel/component/debezium/postgres/DebeziumPostgresEndpointConfigurer.java
|
{
"start": 744,
"end": 57690
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
DebeziumPostgresEndpoint target = (DebeziumPostgresEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": target.getConfiguration().setAdditionalProperties(property(camelContext, java.util.Map.class, value)); return true;
case "binaryhandlingmode":
case "binaryHandlingMode": target.getConfiguration().setBinaryHandlingMode(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "columnexcludelist":
case "columnExcludeList": target.getConfiguration().setColumnExcludeList(property(camelContext, java.lang.String.class, value)); return true;
case "columnincludelist":
case "columnIncludeList": target.getConfiguration().setColumnIncludeList(property(camelContext, java.lang.String.class, value)); return true;
case "columnpropagatesourcetype":
case "columnPropagateSourceType": target.getConfiguration().setColumnPropagateSourceType(property(camelContext, java.lang.String.class, value)); return true;
case "connectionvalidationtimeoutms":
case "connectionValidationTimeoutMs": target.getConfiguration().setConnectionValidationTimeoutMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "converters": target.getConfiguration().setConverters(property(camelContext, java.lang.String.class, value)); return true;
case "custommetrictags":
case "customMetricTags": target.getConfiguration().setCustomMetricTags(property(camelContext, java.lang.String.class, value)); return true;
case "databasedbname":
case "databaseDbname": target.getConfiguration().setDatabaseDbname(property(camelContext, java.lang.String.class, value)); return true;
case "databasehostname":
case "databaseHostname": target.getConfiguration().setDatabaseHostname(property(camelContext, java.lang.String.class, value)); return true;
case "databaseinitialstatements":
case "databaseInitialStatements": target.getConfiguration().setDatabaseInitialStatements(property(camelContext, java.lang.String.class, value)); return true;
case "databasepassword":
case "databasePassword": target.getConfiguration().setDatabasePassword(property(camelContext, java.lang.String.class, value)); return true;
case "databaseport":
case "databasePort": target.getConfiguration().setDatabasePort(property(camelContext, int.class, value)); return true;
case "databasequerytimeoutms":
case "databaseQueryTimeoutMs": target.getConfiguration().setDatabaseQueryTimeoutMs(property(camelContext, int.class, value)); return true;
case "databasesslcert":
case "databaseSslcert": target.getConfiguration().setDatabaseSslcert(property(camelContext, java.lang.String.class, value)); return true;
case "databasesslfactory":
case "databaseSslfactory": target.getConfiguration().setDatabaseSslfactory(property(camelContext, java.lang.String.class, value)); return true;
case "databasesslkey":
case "databaseSslkey": target.getConfiguration().setDatabaseSslkey(property(camelContext, java.lang.String.class, value)); return true;
case "databasesslmode":
case "databaseSslmode": target.getConfiguration().setDatabaseSslmode(property(camelContext, java.lang.String.class, value)); return true;
case "databasesslpassword":
case "databaseSslpassword": target.getConfiguration().setDatabaseSslpassword(property(camelContext, java.lang.String.class, value)); return true;
case "databasesslrootcert":
case "databaseSslrootcert": target.getConfiguration().setDatabaseSslrootcert(property(camelContext, java.lang.String.class, value)); return true;
case "databasetcpkeepalive":
case "databaseTcpkeepalive": target.getConfiguration().setDatabaseTcpkeepalive(property(camelContext, boolean.class, value)); return true;
case "databaseuser":
case "databaseUser": target.getConfiguration().setDatabaseUser(property(camelContext, java.lang.String.class, value)); return true;
case "datatypepropagatesourcetype":
case "datatypePropagateSourceType": target.getConfiguration().setDatatypePropagateSourceType(property(camelContext, java.lang.String.class, value)); return true;
case "decimalhandlingmode":
case "decimalHandlingMode": target.getConfiguration().setDecimalHandlingMode(property(camelContext, java.lang.String.class, value)); return true;
case "errorsmaxretries":
case "errorsMaxRetries": target.getConfiguration().setErrorsMaxRetries(property(camelContext, int.class, value)); return true;
case "eventprocessingfailurehandlingmode":
case "eventProcessingFailureHandlingMode": target.getConfiguration().setEventProcessingFailureHandlingMode(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "executorshutdowntimeoutms":
case "executorShutdownTimeoutMs": target.getConfiguration().setExecutorShutdownTimeoutMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "extendedheadersenabled":
case "extendedHeadersEnabled": target.getConfiguration().setExtendedHeadersEnabled(property(camelContext, boolean.class, value)); return true;
case "flushlsnsource":
case "flushLsnSource": target.getConfiguration().setFlushLsnSource(property(camelContext, boolean.class, value)); return true;
case "guardrailcollectionslimitaction":
case "guardrailCollectionsLimitAction": target.getConfiguration().setGuardrailCollectionsLimitAction(property(camelContext, java.lang.String.class, value)); return true;
case "guardrailcollectionsmax":
case "guardrailCollectionsMax": target.getConfiguration().setGuardrailCollectionsMax(property(camelContext, int.class, value)); return true;
case "heartbeatactionquery":
case "heartbeatActionQuery": target.getConfiguration().setHeartbeatActionQuery(property(camelContext, java.lang.String.class, value)); return true;
case "heartbeatintervalms":
case "heartbeatIntervalMs": target.getConfiguration().setHeartbeatIntervalMs(property(camelContext, int.class, value)); return true;
case "heartbeattopicsprefix":
case "heartbeatTopicsPrefix": target.getConfiguration().setHeartbeatTopicsPrefix(property(camelContext, java.lang.String.class, value)); return true;
case "hstorehandlingmode":
case "hstoreHandlingMode": target.getConfiguration().setHstoreHandlingMode(property(camelContext, java.lang.String.class, value)); return true;
case "includeschemacomments":
case "includeSchemaComments": target.getConfiguration().setIncludeSchemaComments(property(camelContext, boolean.class, value)); return true;
case "includeunknowndatatypes":
case "includeUnknownDatatypes": target.getConfiguration().setIncludeUnknownDatatypes(property(camelContext, boolean.class, value)); return true;
case "incrementalsnapshotchunksize":
case "incrementalSnapshotChunkSize": target.getConfiguration().setIncrementalSnapshotChunkSize(property(camelContext, int.class, value)); return true;
case "incrementalsnapshotwatermarkingstrategy":
case "incrementalSnapshotWatermarkingStrategy": target.getConfiguration().setIncrementalSnapshotWatermarkingStrategy(property(camelContext, java.lang.String.class, value)); return true;
case "internalkeyconverter":
case "internalKeyConverter": target.getConfiguration().setInternalKeyConverter(property(camelContext, java.lang.String.class, value)); return true;
case "internalvalueconverter":
case "internalValueConverter": target.getConfiguration().setInternalValueConverter(property(camelContext, java.lang.String.class, value)); return true;
case "intervalhandlingmode":
case "intervalHandlingMode": target.getConfiguration().setIntervalHandlingMode(property(camelContext, java.lang.String.class, value)); return true;
case "lsnflushtimeoutaction":
case "lsnFlushTimeoutAction": target.getConfiguration().setLsnFlushTimeoutAction(property(camelContext, java.lang.String.class, value)); return true;
case "lsnflushtimeoutms":
case "lsnFlushTimeoutMs": target.getConfiguration().setLsnFlushTimeoutMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "maxbatchsize":
case "maxBatchSize": target.getConfiguration().setMaxBatchSize(property(camelContext, int.class, value)); return true;
case "maxqueuesize":
case "maxQueueSize": target.getConfiguration().setMaxQueueSize(property(camelContext, int.class, value)); return true;
case "maxqueuesizeinbytes":
case "maxQueueSizeInBytes": target.getConfiguration().setMaxQueueSizeInBytes(property(camelContext, long.class, value)); return true;
case "messagekeycolumns":
case "messageKeyColumns": target.getConfiguration().setMessageKeyColumns(property(camelContext, java.lang.String.class, value)); return true;
case "messageprefixexcludelist":
case "messagePrefixExcludeList": target.getConfiguration().setMessagePrefixExcludeList(property(camelContext, java.lang.String.class, value)); return true;
case "messageprefixincludelist":
case "messagePrefixIncludeList": target.getConfiguration().setMessagePrefixIncludeList(property(camelContext, java.lang.String.class, value)); return true;
case "notificationenabledchannels":
case "notificationEnabledChannels": target.getConfiguration().setNotificationEnabledChannels(property(camelContext, java.lang.String.class, value)); return true;
case "notificationsinktopicname":
case "notificationSinkTopicName": target.getConfiguration().setNotificationSinkTopicName(property(camelContext, java.lang.String.class, value)); return true;
case "offsetcommitpolicy":
case "offsetCommitPolicy": target.getConfiguration().setOffsetCommitPolicy(property(camelContext, java.lang.String.class, value)); return true;
case "offsetcommittimeoutms":
case "offsetCommitTimeoutMs": target.getConfiguration().setOffsetCommitTimeoutMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "offsetflushintervalms":
case "offsetFlushIntervalMs": target.getConfiguration().setOffsetFlushIntervalMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "offsetstorage":
case "offsetStorage": target.getConfiguration().setOffsetStorage(property(camelContext, java.lang.String.class, value)); return true;
case "offsetstoragefilename":
case "offsetStorageFileName": target.getConfiguration().setOffsetStorageFileName(property(camelContext, java.lang.String.class, value)); return true;
case "offsetstoragepartitions":
case "offsetStoragePartitions": target.getConfiguration().setOffsetStoragePartitions(property(camelContext, int.class, value)); return true;
case "offsetstoragereplicationfactor":
case "offsetStorageReplicationFactor": target.getConfiguration().setOffsetStorageReplicationFactor(property(camelContext, int.class, value)); return true;
case "offsetstoragetopic":
case "offsetStorageTopic": target.getConfiguration().setOffsetStorageTopic(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationconfigfilepath":
case "openlineageIntegrationConfigFilePath": target.getConfiguration().setOpenlineageIntegrationConfigFilePath(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationdatasetkafkabootstrapservers":
case "openlineageIntegrationDatasetKafkaBootstrapServers": target.getConfiguration().setOpenlineageIntegrationDatasetKafkaBootstrapServers(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationenabled":
case "openlineageIntegrationEnabled": target.getConfiguration().setOpenlineageIntegrationEnabled(property(camelContext, boolean.class, value)); return true;
case "openlineageintegrationjobdescription":
case "openlineageIntegrationJobDescription": target.getConfiguration().setOpenlineageIntegrationJobDescription(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationjobnamespace":
case "openlineageIntegrationJobNamespace": target.getConfiguration().setOpenlineageIntegrationJobNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationjobowners":
case "openlineageIntegrationJobOwners": target.getConfiguration().setOpenlineageIntegrationJobOwners(property(camelContext, java.lang.String.class, value)); return true;
case "openlineageintegrationjobtags":
case "openlineageIntegrationJobTags": target.getConfiguration().setOpenlineageIntegrationJobTags(property(camelContext, java.lang.String.class, value)); return true;
case "pluginname":
case "pluginName": target.getConfiguration().setPluginName(property(camelContext, java.lang.String.class, value)); return true;
case "pollintervalms":
case "pollIntervalMs": target.getConfiguration().setPollIntervalMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "postprocessors":
case "postProcessors": target.getConfiguration().setPostProcessors(property(camelContext, java.lang.String.class, value)); return true;
case "providetransactionmetadata":
case "provideTransactionMetadata": target.getConfiguration().setProvideTransactionMetadata(property(camelContext, boolean.class, value)); return true;
case "publicationautocreatemode":
case "publicationAutocreateMode": target.getConfiguration().setPublicationAutocreateMode(property(camelContext, java.lang.String.class, value)); return true;
case "publicationname":
case "publicationName": target.getConfiguration().setPublicationName(property(camelContext, java.lang.String.class, value)); return true;
case "publishviapartitionroot":
case "publishViaPartitionRoot": target.getConfiguration().setPublishViaPartitionRoot(property(camelContext, boolean.class, value)); return true;
case "queryfetchsize":
case "queryFetchSize": target.getConfiguration().setQueryFetchSize(property(camelContext, int.class, value)); return true;
case "replicaidentityautosetvalues":
case "replicaIdentityAutosetValues": target.getConfiguration().setReplicaIdentityAutosetValues(property(camelContext, java.lang.String.class, value)); return true;
case "retriablerestartconnectorwaitms":
case "retriableRestartConnectorWaitMs": target.getConfiguration().setRetriableRestartConnectorWaitMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "schemaexcludelist":
case "schemaExcludeList": target.getConfiguration().setSchemaExcludeList(property(camelContext, java.lang.String.class, value)); return true;
case "schemahistoryinternalfilefilename":
case "schemaHistoryInternalFileFilename": target.getConfiguration().setSchemaHistoryInternalFileFilename(property(camelContext, java.lang.String.class, value)); return true;
case "schemaincludelist":
case "schemaIncludeList": target.getConfiguration().setSchemaIncludeList(property(camelContext, java.lang.String.class, value)); return true;
case "schemanameadjustmentmode":
case "schemaNameAdjustmentMode": target.getConfiguration().setSchemaNameAdjustmentMode(property(camelContext, java.lang.String.class, value)); return true;
case "schemarefreshmode":
case "schemaRefreshMode": target.getConfiguration().setSchemaRefreshMode(property(camelContext, java.lang.String.class, value)); return true;
case "signaldatacollection":
case "signalDataCollection": target.getConfiguration().setSignalDataCollection(property(camelContext, java.lang.String.class, value)); return true;
case "signalenabledchannels":
case "signalEnabledChannels": target.getConfiguration().setSignalEnabledChannels(property(camelContext, java.lang.String.class, value)); return true;
case "signalpollintervalms":
case "signalPollIntervalMs": target.getConfiguration().setSignalPollIntervalMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "skippedoperations":
case "skippedOperations": target.getConfiguration().setSkippedOperations(property(camelContext, java.lang.String.class, value)); return true;
case "slotdroponstop":
case "slotDropOnStop": target.getConfiguration().setSlotDropOnStop(property(camelContext, boolean.class, value)); return true;
case "slotfailover":
case "slotFailover": target.getConfiguration().setSlotFailover(property(camelContext, boolean.class, value)); return true;
case "slotmaxretries":
case "slotMaxRetries": target.getConfiguration().setSlotMaxRetries(property(camelContext, int.class, value)); return true;
case "slotname":
case "slotName": target.getConfiguration().setSlotName(property(camelContext, java.lang.String.class, value)); return true;
case "slotretrydelayms":
case "slotRetryDelayMs": target.getConfiguration().setSlotRetryDelayMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "slotstreamparams":
case "slotStreamParams": target.getConfiguration().setSlotStreamParams(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotdelayms":
case "snapshotDelayMs": target.getConfiguration().setSnapshotDelayMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "snapshotfetchsize":
case "snapshotFetchSize": target.getConfiguration().setSnapshotFetchSize(property(camelContext, int.class, value)); return true;
case "snapshotincludecollectionlist":
case "snapshotIncludeCollectionList": target.getConfiguration().setSnapshotIncludeCollectionList(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotisolationmode":
case "snapshotIsolationMode": target.getConfiguration().setSnapshotIsolationMode(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotlocktimeoutms":
case "snapshotLockTimeoutMs": target.getConfiguration().setSnapshotLockTimeoutMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "snapshotlockingmode":
case "snapshotLockingMode": target.getConfiguration().setSnapshotLockingMode(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotlockingmodecustomname":
case "snapshotLockingModeCustomName": target.getConfiguration().setSnapshotLockingModeCustomName(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotmaxthreads":
case "snapshotMaxThreads": target.getConfiguration().setSnapshotMaxThreads(property(camelContext, int.class, value)); return true;
case "snapshotmode":
case "snapshotMode": target.getConfiguration().setSnapshotMode(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotmodeconfigurationbasedsnapshotdata":
case "snapshotModeConfigurationBasedSnapshotData": target.getConfiguration().setSnapshotModeConfigurationBasedSnapshotData(property(camelContext, boolean.class, value)); return true;
case "snapshotmodeconfigurationbasedsnapshotondataerror":
case "snapshotModeConfigurationBasedSnapshotOnDataError": target.getConfiguration().setSnapshotModeConfigurationBasedSnapshotOnDataError(property(camelContext, boolean.class, value)); return true;
case "snapshotmodeconfigurationbasedsnapshotonschemaerror":
case "snapshotModeConfigurationBasedSnapshotOnSchemaError": target.getConfiguration().setSnapshotModeConfigurationBasedSnapshotOnSchemaError(property(camelContext, boolean.class, value)); return true;
case "snapshotmodeconfigurationbasedsnapshotschema":
case "snapshotModeConfigurationBasedSnapshotSchema": target.getConfiguration().setSnapshotModeConfigurationBasedSnapshotSchema(property(camelContext, boolean.class, value)); return true;
case "snapshotmodeconfigurationbasedstartstream":
case "snapshotModeConfigurationBasedStartStream": target.getConfiguration().setSnapshotModeConfigurationBasedStartStream(property(camelContext, boolean.class, value)); return true;
case "snapshotmodecustomname":
case "snapshotModeCustomName": target.getConfiguration().setSnapshotModeCustomName(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotquerymode":
case "snapshotQueryMode": target.getConfiguration().setSnapshotQueryMode(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotquerymodecustomname":
case "snapshotQueryModeCustomName": target.getConfiguration().setSnapshotQueryModeCustomName(property(camelContext, java.lang.String.class, value)); return true;
case "snapshotselectstatementoverrides":
case "snapshotSelectStatementOverrides": target.getConfiguration().setSnapshotSelectStatementOverrides(property(camelContext, java.lang.String.class, value)); return true;
case "snapshottablesorderbyrowcount":
case "snapshotTablesOrderByRowCount": target.getConfiguration().setSnapshotTablesOrderByRowCount(property(camelContext, java.lang.String.class, value)); return true;
case "sourceinfostructmaker":
case "sourceinfoStructMaker": target.getConfiguration().setSourceinfoStructMaker(property(camelContext, java.lang.String.class, value)); return true;
case "statusupdateintervalms":
case "statusUpdateIntervalMs": target.getConfiguration().setStatusUpdateIntervalMs(property(camelContext, int.class, value)); return true;
case "streamingdelayms":
case "streamingDelayMs": target.getConfiguration().setStreamingDelayMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "tableexcludelist":
case "tableExcludeList": target.getConfiguration().setTableExcludeList(property(camelContext, java.lang.String.class, value)); return true;
case "tableignorebuiltin":
case "tableIgnoreBuiltin": target.getConfiguration().setTableIgnoreBuiltin(property(camelContext, boolean.class, value)); return true;
case "tableincludelist":
case "tableIncludeList": target.getConfiguration().setTableIncludeList(property(camelContext, java.lang.String.class, value)); return true;
case "timeprecisionmode":
case "timePrecisionMode": target.getConfiguration().setTimePrecisionMode(property(camelContext, java.lang.String.class, value)); return true;
case "tombstonesondelete":
case "tombstonesOnDelete": target.getConfiguration().setTombstonesOnDelete(property(camelContext, boolean.class, value)); return true;
case "topicnamingstrategy":
case "topicNamingStrategy": target.getConfiguration().setTopicNamingStrategy(property(camelContext, java.lang.String.class, value)); return true;
case "topicprefix":
case "topicPrefix": target.getConfiguration().setTopicPrefix(property(camelContext, java.lang.String.class, value)); return true;
case "transactionmetadatafactory":
case "transactionMetadataFactory": target.getConfiguration().setTransactionMetadataFactory(property(camelContext, java.lang.String.class, value)); return true;
case "unavailablevalueplaceholder":
case "unavailableValuePlaceholder": target.getConfiguration().setUnavailableValuePlaceholder(property(camelContext, java.lang.String.class, value)); return true;
case "xminfetchintervalms":
case "xminFetchIntervalMs": target.getConfiguration().setXminFetchIntervalMs(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return java.util.Map.class;
case "binaryhandlingmode":
case "binaryHandlingMode": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "columnexcludelist":
case "columnExcludeList": return java.lang.String.class;
case "columnincludelist":
case "columnIncludeList": return java.lang.String.class;
case "columnpropagatesourcetype":
case "columnPropagateSourceType": return java.lang.String.class;
case "connectionvalidationtimeoutms":
case "connectionValidationTimeoutMs": return long.class;
case "converters": return java.lang.String.class;
case "custommetrictags":
case "customMetricTags": return java.lang.String.class;
case "databasedbname":
case "databaseDbname": return java.lang.String.class;
case "databasehostname":
case "databaseHostname": return java.lang.String.class;
case "databaseinitialstatements":
case "databaseInitialStatements": return java.lang.String.class;
case "databasepassword":
case "databasePassword": return java.lang.String.class;
case "databaseport":
case "databasePort": return int.class;
case "databasequerytimeoutms":
case "databaseQueryTimeoutMs": return int.class;
case "databasesslcert":
case "databaseSslcert": return java.lang.String.class;
case "databasesslfactory":
case "databaseSslfactory": return java.lang.String.class;
case "databasesslkey":
case "databaseSslkey": return java.lang.String.class;
case "databasesslmode":
case "databaseSslmode": return java.lang.String.class;
case "databasesslpassword":
case "databaseSslpassword": return java.lang.String.class;
case "databasesslrootcert":
case "databaseSslrootcert": return java.lang.String.class;
case "databasetcpkeepalive":
case "databaseTcpkeepalive": return boolean.class;
case "databaseuser":
case "databaseUser": return java.lang.String.class;
case "datatypepropagatesourcetype":
case "datatypePropagateSourceType": return java.lang.String.class;
case "decimalhandlingmode":
case "decimalHandlingMode": return java.lang.String.class;
case "errorsmaxretries":
case "errorsMaxRetries": return int.class;
case "eventprocessingfailurehandlingmode":
case "eventProcessingFailureHandlingMode": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "executorshutdowntimeoutms":
case "executorShutdownTimeoutMs": return long.class;
case "extendedheadersenabled":
case "extendedHeadersEnabled": return boolean.class;
case "flushlsnsource":
case "flushLsnSource": return boolean.class;
case "guardrailcollectionslimitaction":
case "guardrailCollectionsLimitAction": return java.lang.String.class;
case "guardrailcollectionsmax":
case "guardrailCollectionsMax": return int.class;
case "heartbeatactionquery":
case "heartbeatActionQuery": return java.lang.String.class;
case "heartbeatintervalms":
case "heartbeatIntervalMs": return int.class;
case "heartbeattopicsprefix":
case "heartbeatTopicsPrefix": return java.lang.String.class;
case "hstorehandlingmode":
case "hstoreHandlingMode": return java.lang.String.class;
case "includeschemacomments":
case "includeSchemaComments": return boolean.class;
case "includeunknowndatatypes":
case "includeUnknownDatatypes": return boolean.class;
case "incrementalsnapshotchunksize":
case "incrementalSnapshotChunkSize": return int.class;
case "incrementalsnapshotwatermarkingstrategy":
case "incrementalSnapshotWatermarkingStrategy": return java.lang.String.class;
case "internalkeyconverter":
case "internalKeyConverter": return java.lang.String.class;
case "internalvalueconverter":
case "internalValueConverter": return java.lang.String.class;
case "intervalhandlingmode":
case "intervalHandlingMode": return java.lang.String.class;
case "lsnflushtimeoutaction":
case "lsnFlushTimeoutAction": return java.lang.String.class;
case "lsnflushtimeoutms":
case "lsnFlushTimeoutMs": return long.class;
case "maxbatchsize":
case "maxBatchSize": return int.class;
case "maxqueuesize":
case "maxQueueSize": return int.class;
case "maxqueuesizeinbytes":
case "maxQueueSizeInBytes": return long.class;
case "messagekeycolumns":
case "messageKeyColumns": return java.lang.String.class;
case "messageprefixexcludelist":
case "messagePrefixExcludeList": return java.lang.String.class;
case "messageprefixincludelist":
case "messagePrefixIncludeList": return java.lang.String.class;
case "notificationenabledchannels":
case "notificationEnabledChannels": return java.lang.String.class;
case "notificationsinktopicname":
case "notificationSinkTopicName": return java.lang.String.class;
case "offsetcommitpolicy":
case "offsetCommitPolicy": return java.lang.String.class;
case "offsetcommittimeoutms":
case "offsetCommitTimeoutMs": return long.class;
case "offsetflushintervalms":
case "offsetFlushIntervalMs": return long.class;
case "offsetstorage":
case "offsetStorage": return java.lang.String.class;
case "offsetstoragefilename":
case "offsetStorageFileName": return java.lang.String.class;
case "offsetstoragepartitions":
case "offsetStoragePartitions": return int.class;
case "offsetstoragereplicationfactor":
case "offsetStorageReplicationFactor": return int.class;
case "offsetstoragetopic":
case "offsetStorageTopic": return java.lang.String.class;
case "openlineageintegrationconfigfilepath":
case "openlineageIntegrationConfigFilePath": return java.lang.String.class;
case "openlineageintegrationdatasetkafkabootstrapservers":
case "openlineageIntegrationDatasetKafkaBootstrapServers": return java.lang.String.class;
case "openlineageintegrationenabled":
case "openlineageIntegrationEnabled": return boolean.class;
case "openlineageintegrationjobdescription":
case "openlineageIntegrationJobDescription": return java.lang.String.class;
case "openlineageintegrationjobnamespace":
case "openlineageIntegrationJobNamespace": return java.lang.String.class;
case "openlineageintegrationjobowners":
case "openlineageIntegrationJobOwners": return java.lang.String.class;
case "openlineageintegrationjobtags":
case "openlineageIntegrationJobTags": return java.lang.String.class;
case "pluginname":
case "pluginName": return java.lang.String.class;
case "pollintervalms":
case "pollIntervalMs": return long.class;
case "postprocessors":
case "postProcessors": return java.lang.String.class;
case "providetransactionmetadata":
case "provideTransactionMetadata": return boolean.class;
case "publicationautocreatemode":
case "publicationAutocreateMode": return java.lang.String.class;
case "publicationname":
case "publicationName": return java.lang.String.class;
case "publishviapartitionroot":
case "publishViaPartitionRoot": return boolean.class;
case "queryfetchsize":
case "queryFetchSize": return int.class;
case "replicaidentityautosetvalues":
case "replicaIdentityAutosetValues": return java.lang.String.class;
case "retriablerestartconnectorwaitms":
case "retriableRestartConnectorWaitMs": return long.class;
case "schemaexcludelist":
case "schemaExcludeList": return java.lang.String.class;
case "schemahistoryinternalfilefilename":
case "schemaHistoryInternalFileFilename": return java.lang.String.class;
case "schemaincludelist":
case "schemaIncludeList": return java.lang.String.class;
case "schemanameadjustmentmode":
case "schemaNameAdjustmentMode": return java.lang.String.class;
case "schemarefreshmode":
case "schemaRefreshMode": return java.lang.String.class;
case "signaldatacollection":
case "signalDataCollection": return java.lang.String.class;
case "signalenabledchannels":
case "signalEnabledChannels": return java.lang.String.class;
case "signalpollintervalms":
case "signalPollIntervalMs": return long.class;
case "skippedoperations":
case "skippedOperations": return java.lang.String.class;
case "slotdroponstop":
case "slotDropOnStop": return boolean.class;
case "slotfailover":
case "slotFailover": return boolean.class;
case "slotmaxretries":
case "slotMaxRetries": return int.class;
case "slotname":
case "slotName": return java.lang.String.class;
case "slotretrydelayms":
case "slotRetryDelayMs": return long.class;
case "slotstreamparams":
case "slotStreamParams": return java.lang.String.class;
case "snapshotdelayms":
case "snapshotDelayMs": return long.class;
case "snapshotfetchsize":
case "snapshotFetchSize": return int.class;
case "snapshotincludecollectionlist":
case "snapshotIncludeCollectionList": return java.lang.String.class;
case "snapshotisolationmode":
case "snapshotIsolationMode": return java.lang.String.class;
case "snapshotlocktimeoutms":
case "snapshotLockTimeoutMs": return long.class;
case "snapshotlockingmode":
case "snapshotLockingMode": return java.lang.String.class;
case "snapshotlockingmodecustomname":
case "snapshotLockingModeCustomName": return java.lang.String.class;
case "snapshotmaxthreads":
case "snapshotMaxThreads": return int.class;
case "snapshotmode":
case "snapshotMode": return java.lang.String.class;
case "snapshotmodeconfigurationbasedsnapshotdata":
case "snapshotModeConfigurationBasedSnapshotData": return boolean.class;
case "snapshotmodeconfigurationbasedsnapshotondataerror":
case "snapshotModeConfigurationBasedSnapshotOnDataError": return boolean.class;
case "snapshotmodeconfigurationbasedsnapshotonschemaerror":
case "snapshotModeConfigurationBasedSnapshotOnSchemaError": return boolean.class;
case "snapshotmodeconfigurationbasedsnapshotschema":
case "snapshotModeConfigurationBasedSnapshotSchema": return boolean.class;
case "snapshotmodeconfigurationbasedstartstream":
case "snapshotModeConfigurationBasedStartStream": return boolean.class;
case "snapshotmodecustomname":
case "snapshotModeCustomName": return java.lang.String.class;
case "snapshotquerymode":
case "snapshotQueryMode": return java.lang.String.class;
case "snapshotquerymodecustomname":
case "snapshotQueryModeCustomName": return java.lang.String.class;
case "snapshotselectstatementoverrides":
case "snapshotSelectStatementOverrides": return java.lang.String.class;
case "snapshottablesorderbyrowcount":
case "snapshotTablesOrderByRowCount": return java.lang.String.class;
case "sourceinfostructmaker":
case "sourceinfoStructMaker": return java.lang.String.class;
case "statusupdateintervalms":
case "statusUpdateIntervalMs": return int.class;
case "streamingdelayms":
case "streamingDelayMs": return long.class;
case "tableexcludelist":
case "tableExcludeList": return java.lang.String.class;
case "tableignorebuiltin":
case "tableIgnoreBuiltin": return boolean.class;
case "tableincludelist":
case "tableIncludeList": return java.lang.String.class;
case "timeprecisionmode":
case "timePrecisionMode": return java.lang.String.class;
case "tombstonesondelete":
case "tombstonesOnDelete": return boolean.class;
case "topicnamingstrategy":
case "topicNamingStrategy": return java.lang.String.class;
case "topicprefix":
case "topicPrefix": return java.lang.String.class;
case "transactionmetadatafactory":
case "transactionMetadataFactory": return java.lang.String.class;
case "unavailablevalueplaceholder":
case "unavailableValuePlaceholder": return java.lang.String.class;
case "xminfetchintervalms":
case "xminFetchIntervalMs": return long.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
DebeziumPostgresEndpoint target = (DebeziumPostgresEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return target.getConfiguration().getAdditionalProperties();
case "binaryhandlingmode":
case "binaryHandlingMode": return target.getConfiguration().getBinaryHandlingMode();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "columnexcludelist":
case "columnExcludeList": return target.getConfiguration().getColumnExcludeList();
case "columnincludelist":
case "columnIncludeList": return target.getConfiguration().getColumnIncludeList();
case "columnpropagatesourcetype":
case "columnPropagateSourceType": return target.getConfiguration().getColumnPropagateSourceType();
case "connectionvalidationtimeoutms":
case "connectionValidationTimeoutMs": return target.getConfiguration().getConnectionValidationTimeoutMs();
case "converters": return target.getConfiguration().getConverters();
case "custommetrictags":
case "customMetricTags": return target.getConfiguration().getCustomMetricTags();
case "databasedbname":
case "databaseDbname": return target.getConfiguration().getDatabaseDbname();
case "databasehostname":
case "databaseHostname": return target.getConfiguration().getDatabaseHostname();
case "databaseinitialstatements":
case "databaseInitialStatements": return target.getConfiguration().getDatabaseInitialStatements();
case "databasepassword":
case "databasePassword": return target.getConfiguration().getDatabasePassword();
case "databaseport":
case "databasePort": return target.getConfiguration().getDatabasePort();
case "databasequerytimeoutms":
case "databaseQueryTimeoutMs": return target.getConfiguration().getDatabaseQueryTimeoutMs();
case "databasesslcert":
case "databaseSslcert": return target.getConfiguration().getDatabaseSslcert();
case "databasesslfactory":
case "databaseSslfactory": return target.getConfiguration().getDatabaseSslfactory();
case "databasesslkey":
case "databaseSslkey": return target.getConfiguration().getDatabaseSslkey();
case "databasesslmode":
case "databaseSslmode": return target.getConfiguration().getDatabaseSslmode();
case "databasesslpassword":
case "databaseSslpassword": return target.getConfiguration().getDatabaseSslpassword();
case "databasesslrootcert":
case "databaseSslrootcert": return target.getConfiguration().getDatabaseSslrootcert();
case "databasetcpkeepalive":
case "databaseTcpkeepalive": return target.getConfiguration().isDatabaseTcpkeepalive();
case "databaseuser":
case "databaseUser": return target.getConfiguration().getDatabaseUser();
case "datatypepropagatesourcetype":
case "datatypePropagateSourceType": return target.getConfiguration().getDatatypePropagateSourceType();
case "decimalhandlingmode":
case "decimalHandlingMode": return target.getConfiguration().getDecimalHandlingMode();
case "errorsmaxretries":
case "errorsMaxRetries": return target.getConfiguration().getErrorsMaxRetries();
case "eventprocessingfailurehandlingmode":
case "eventProcessingFailureHandlingMode": return target.getConfiguration().getEventProcessingFailureHandlingMode();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "executorshutdowntimeoutms":
case "executorShutdownTimeoutMs": return target.getConfiguration().getExecutorShutdownTimeoutMs();
case "extendedheadersenabled":
case "extendedHeadersEnabled": return target.getConfiguration().isExtendedHeadersEnabled();
case "flushlsnsource":
case "flushLsnSource": return target.getConfiguration().isFlushLsnSource();
case "guardrailcollectionslimitaction":
case "guardrailCollectionsLimitAction": return target.getConfiguration().getGuardrailCollectionsLimitAction();
case "guardrailcollectionsmax":
case "guardrailCollectionsMax": return target.getConfiguration().getGuardrailCollectionsMax();
case "heartbeatactionquery":
case "heartbeatActionQuery": return target.getConfiguration().getHeartbeatActionQuery();
case "heartbeatintervalms":
case "heartbeatIntervalMs": return target.getConfiguration().getHeartbeatIntervalMs();
case "heartbeattopicsprefix":
case "heartbeatTopicsPrefix": return target.getConfiguration().getHeartbeatTopicsPrefix();
case "hstorehandlingmode":
case "hstoreHandlingMode": return target.getConfiguration().getHstoreHandlingMode();
case "includeschemacomments":
case "includeSchemaComments": return target.getConfiguration().isIncludeSchemaComments();
case "includeunknowndatatypes":
case "includeUnknownDatatypes": return target.getConfiguration().isIncludeUnknownDatatypes();
case "incrementalsnapshotchunksize":
case "incrementalSnapshotChunkSize": return target.getConfiguration().getIncrementalSnapshotChunkSize();
case "incrementalsnapshotwatermarkingstrategy":
case "incrementalSnapshotWatermarkingStrategy": return target.getConfiguration().getIncrementalSnapshotWatermarkingStrategy();
case "internalkeyconverter":
case "internalKeyConverter": return target.getConfiguration().getInternalKeyConverter();
case "internalvalueconverter":
case "internalValueConverter": return target.getConfiguration().getInternalValueConverter();
case "intervalhandlingmode":
case "intervalHandlingMode": return target.getConfiguration().getIntervalHandlingMode();
case "lsnflushtimeoutaction":
case "lsnFlushTimeoutAction": return target.getConfiguration().getLsnFlushTimeoutAction();
case "lsnflushtimeoutms":
case "lsnFlushTimeoutMs": return target.getConfiguration().getLsnFlushTimeoutMs();
case "maxbatchsize":
case "maxBatchSize": return target.getConfiguration().getMaxBatchSize();
case "maxqueuesize":
case "maxQueueSize": return target.getConfiguration().getMaxQueueSize();
case "maxqueuesizeinbytes":
case "maxQueueSizeInBytes": return target.getConfiguration().getMaxQueueSizeInBytes();
case "messagekeycolumns":
case "messageKeyColumns": return target.getConfiguration().getMessageKeyColumns();
case "messageprefixexcludelist":
case "messagePrefixExcludeList": return target.getConfiguration().getMessagePrefixExcludeList();
case "messageprefixincludelist":
case "messagePrefixIncludeList": return target.getConfiguration().getMessagePrefixIncludeList();
case "notificationenabledchannels":
case "notificationEnabledChannels": return target.getConfiguration().getNotificationEnabledChannels();
case "notificationsinktopicname":
case "notificationSinkTopicName": return target.getConfiguration().getNotificationSinkTopicName();
case "offsetcommitpolicy":
case "offsetCommitPolicy": return target.getConfiguration().getOffsetCommitPolicy();
case "offsetcommittimeoutms":
case "offsetCommitTimeoutMs": return target.getConfiguration().getOffsetCommitTimeoutMs();
case "offsetflushintervalms":
case "offsetFlushIntervalMs": return target.getConfiguration().getOffsetFlushIntervalMs();
case "offsetstorage":
case "offsetStorage": return target.getConfiguration().getOffsetStorage();
case "offsetstoragefilename":
case "offsetStorageFileName": return target.getConfiguration().getOffsetStorageFileName();
case "offsetstoragepartitions":
case "offsetStoragePartitions": return target.getConfiguration().getOffsetStoragePartitions();
case "offsetstoragereplicationfactor":
case "offsetStorageReplicationFactor": return target.getConfiguration().getOffsetStorageReplicationFactor();
case "offsetstoragetopic":
case "offsetStorageTopic": return target.getConfiguration().getOffsetStorageTopic();
case "openlineageintegrationconfigfilepath":
case "openlineageIntegrationConfigFilePath": return target.getConfiguration().getOpenlineageIntegrationConfigFilePath();
case "openlineageintegrationdatasetkafkabootstrapservers":
case "openlineageIntegrationDatasetKafkaBootstrapServers": return target.getConfiguration().getOpenlineageIntegrationDatasetKafkaBootstrapServers();
case "openlineageintegrationenabled":
case "openlineageIntegrationEnabled": return target.getConfiguration().isOpenlineageIntegrationEnabled();
case "openlineageintegrationjobdescription":
case "openlineageIntegrationJobDescription": return target.getConfiguration().getOpenlineageIntegrationJobDescription();
case "openlineageintegrationjobnamespace":
case "openlineageIntegrationJobNamespace": return target.getConfiguration().getOpenlineageIntegrationJobNamespace();
case "openlineageintegrationjobowners":
case "openlineageIntegrationJobOwners": return target.getConfiguration().getOpenlineageIntegrationJobOwners();
case "openlineageintegrationjobtags":
case "openlineageIntegrationJobTags": return target.getConfiguration().getOpenlineageIntegrationJobTags();
case "pluginname":
case "pluginName": return target.getConfiguration().getPluginName();
case "pollintervalms":
case "pollIntervalMs": return target.getConfiguration().getPollIntervalMs();
case "postprocessors":
case "postProcessors": return target.getConfiguration().getPostProcessors();
case "providetransactionmetadata":
case "provideTransactionMetadata": return target.getConfiguration().isProvideTransactionMetadata();
case "publicationautocreatemode":
case "publicationAutocreateMode": return target.getConfiguration().getPublicationAutocreateMode();
case "publicationname":
case "publicationName": return target.getConfiguration().getPublicationName();
case "publishviapartitionroot":
case "publishViaPartitionRoot": return target.getConfiguration().isPublishViaPartitionRoot();
case "queryfetchsize":
case "queryFetchSize": return target.getConfiguration().getQueryFetchSize();
case "replicaidentityautosetvalues":
case "replicaIdentityAutosetValues": return target.getConfiguration().getReplicaIdentityAutosetValues();
case "retriablerestartconnectorwaitms":
case "retriableRestartConnectorWaitMs": return target.getConfiguration().getRetriableRestartConnectorWaitMs();
case "schemaexcludelist":
case "schemaExcludeList": return target.getConfiguration().getSchemaExcludeList();
case "schemahistoryinternalfilefilename":
case "schemaHistoryInternalFileFilename": return target.getConfiguration().getSchemaHistoryInternalFileFilename();
case "schemaincludelist":
case "schemaIncludeList": return target.getConfiguration().getSchemaIncludeList();
case "schemanameadjustmentmode":
case "schemaNameAdjustmentMode": return target.getConfiguration().getSchemaNameAdjustmentMode();
case "schemarefreshmode":
case "schemaRefreshMode": return target.getConfiguration().getSchemaRefreshMode();
case "signaldatacollection":
case "signalDataCollection": return target.getConfiguration().getSignalDataCollection();
case "signalenabledchannels":
case "signalEnabledChannels": return target.getConfiguration().getSignalEnabledChannels();
case "signalpollintervalms":
case "signalPollIntervalMs": return target.getConfiguration().getSignalPollIntervalMs();
case "skippedoperations":
case "skippedOperations": return target.getConfiguration().getSkippedOperations();
case "slotdroponstop":
case "slotDropOnStop": return target.getConfiguration().isSlotDropOnStop();
case "slotfailover":
case "slotFailover": return target.getConfiguration().isSlotFailover();
case "slotmaxretries":
case "slotMaxRetries": return target.getConfiguration().getSlotMaxRetries();
case "slotname":
case "slotName": return target.getConfiguration().getSlotName();
case "slotretrydelayms":
case "slotRetryDelayMs": return target.getConfiguration().getSlotRetryDelayMs();
case "slotstreamparams":
case "slotStreamParams": return target.getConfiguration().getSlotStreamParams();
case "snapshotdelayms":
case "snapshotDelayMs": return target.getConfiguration().getSnapshotDelayMs();
case "snapshotfetchsize":
case "snapshotFetchSize": return target.getConfiguration().getSnapshotFetchSize();
case "snapshotincludecollectionlist":
case "snapshotIncludeCollectionList": return target.getConfiguration().getSnapshotIncludeCollectionList();
case "snapshotisolationmode":
case "snapshotIsolationMode": return target.getConfiguration().getSnapshotIsolationMode();
case "snapshotlocktimeoutms":
case "snapshotLockTimeoutMs": return target.getConfiguration().getSnapshotLockTimeoutMs();
case "snapshotlockingmode":
case "snapshotLockingMode": return target.getConfiguration().getSnapshotLockingMode();
case "snapshotlockingmodecustomname":
case "snapshotLockingModeCustomName": return target.getConfiguration().getSnapshotLockingModeCustomName();
case "snapshotmaxthreads":
case "snapshotMaxThreads": return target.getConfiguration().getSnapshotMaxThreads();
case "snapshotmode":
case "snapshotMode": return target.getConfiguration().getSnapshotMode();
case "snapshotmodeconfigurationbasedsnapshotdata":
case "snapshotModeConfigurationBasedSnapshotData": return target.getConfiguration().isSnapshotModeConfigurationBasedSnapshotData();
case "snapshotmodeconfigurationbasedsnapshotondataerror":
case "snapshotModeConfigurationBasedSnapshotOnDataError": return target.getConfiguration().isSnapshotModeConfigurationBasedSnapshotOnDataError();
case "snapshotmodeconfigurationbasedsnapshotonschemaerror":
case "snapshotModeConfigurationBasedSnapshotOnSchemaError": return target.getConfiguration().isSnapshotModeConfigurationBasedSnapshotOnSchemaError();
case "snapshotmodeconfigurationbasedsnapshotschema":
case "snapshotModeConfigurationBasedSnapshotSchema": return target.getConfiguration().isSnapshotModeConfigurationBasedSnapshotSchema();
case "snapshotmodeconfigurationbasedstartstream":
case "snapshotModeConfigurationBasedStartStream": return target.getConfiguration().isSnapshotModeConfigurationBasedStartStream();
case "snapshotmodecustomname":
case "snapshotModeCustomName": return target.getConfiguration().getSnapshotModeCustomName();
case "snapshotquerymode":
case "snapshotQueryMode": return target.getConfiguration().getSnapshotQueryMode();
case "snapshotquerymodecustomname":
case "snapshotQueryModeCustomName": return target.getConfiguration().getSnapshotQueryModeCustomName();
case "snapshotselectstatementoverrides":
case "snapshotSelectStatementOverrides": return target.getConfiguration().getSnapshotSelectStatementOverrides();
case "snapshottablesorderbyrowcount":
case "snapshotTablesOrderByRowCount": return target.getConfiguration().getSnapshotTablesOrderByRowCount();
case "sourceinfostructmaker":
case "sourceinfoStructMaker": return target.getConfiguration().getSourceinfoStructMaker();
case "statusupdateintervalms":
case "statusUpdateIntervalMs": return target.getConfiguration().getStatusUpdateIntervalMs();
case "streamingdelayms":
case "streamingDelayMs": return target.getConfiguration().getStreamingDelayMs();
case "tableexcludelist":
case "tableExcludeList": return target.getConfiguration().getTableExcludeList();
case "tableignorebuiltin":
case "tableIgnoreBuiltin": return target.getConfiguration().isTableIgnoreBuiltin();
case "tableincludelist":
case "tableIncludeList": return target.getConfiguration().getTableIncludeList();
case "timeprecisionmode":
case "timePrecisionMode": return target.getConfiguration().getTimePrecisionMode();
case "tombstonesondelete":
case "tombstonesOnDelete": return target.getConfiguration().isTombstonesOnDelete();
case "topicnamingstrategy":
case "topicNamingStrategy": return target.getConfiguration().getTopicNamingStrategy();
case "topicprefix":
case "topicPrefix": return target.getConfiguration().getTopicPrefix();
case "transactionmetadatafactory":
case "transactionMetadataFactory": return target.getConfiguration().getTransactionMetadataFactory();
case "unavailablevalueplaceholder":
case "unavailableValuePlaceholder": return target.getConfiguration().getUnavailableValuePlaceholder();
case "xminfetchintervalms":
case "xminFetchIntervalMs": return target.getConfiguration().getXminFetchIntervalMs();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return java.lang.Object.class;
default: return null;
}
}
}
|
DebeziumPostgresEndpointConfigurer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/discriminator/MultiInheritanceImplicitDowncastTest.java
|
{
"start": 4482,
"end": 5125
}
|
class ____ extends BaseEmbeddable<PolymorphicSub1> {
private static final long serialVersionUID = 1L;
private String someName1;
private PolymorphicSub1 embeddedRelation1;
public Embeddable1() {
}
public String getSomeName1() {
return someName1;
}
public void setSomeName1(String someName1) {
this.someName1 = someName1;
}
@ManyToOne(fetch = FetchType.LAZY)
public PolymorphicSub1 getEmbeddedRelation1() {
return embeddedRelation1;
}
public void setEmbeddedRelation1(PolymorphicSub1 embeddedRelation1) {
this.embeddedRelation1 = embeddedRelation1;
}
}
@Embeddable
public abstract static
|
Embeddable1
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/JavaDurationGetSecondsGetNanoTest.java
|
{
"start": 8427,
"end": 9056
}
|
class ____ {
Duration DURATION = Duration.ZERO;
long seconds = DURATION.getSeconds();
Object obj =
new Object() {
// BUG: Diagnostic contains: JavaDurationGetSecondsGetNano
long nanos = DURATION.getNano();
};
}
""")
.doTest();
}
@Test
public void getNanoInMethodGetSecondsInLambda() {
compilationHelper
.addSourceLines(
"test/TestCase.java",
"""
package test;
import java.time.Duration;
public
|
TestCase
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/expiring/ExpiringCredentialRefreshConfigTest.java
|
{
"start": 1169,
"end": 2288
}
|
class ____ {
@Test
public void fromGoodConfig() {
ExpiringCredentialRefreshConfig expiringCredentialRefreshConfig = new ExpiringCredentialRefreshConfig(
new ConfigDef().withClientSaslSupport().parse(Collections.emptyMap()), true);
assertEquals(Double.valueOf(SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR),
Double.valueOf(expiringCredentialRefreshConfig.loginRefreshWindowFactor()));
assertEquals(Double.valueOf(SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_JITTER),
Double.valueOf(expiringCredentialRefreshConfig.loginRefreshWindowJitter()));
assertEquals(Short.valueOf(SaslConfigs.DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS),
Short.valueOf(expiringCredentialRefreshConfig.loginRefreshMinPeriodSeconds()));
assertEquals(Short.valueOf(SaslConfigs.DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS),
Short.valueOf(expiringCredentialRefreshConfig.loginRefreshBufferSeconds()));
assertTrue(expiringCredentialRefreshConfig.loginRefreshReloginAllowedBeforeLogout());
}
}
|
ExpiringCredentialRefreshConfigTest
|
java
|
quarkusio__quarkus
|
extensions/panache/mongodb-panache-common/runtime/src/main/java/io/quarkus/mongodb/panache/common/jsonb/ObjectIdSerializer.java
|
{
"start": 244,
"end": 506
}
|
class ____ implements JsonbSerializer<ObjectId> {
@Override
public void serialize(ObjectId obj, JsonGenerator generator, SerializationContext ctx) {
if (obj != null) {
generator.write(obj.toString());
}
}
}
|
ObjectIdSerializer
|
java
|
apache__camel
|
components/camel-jira/src/main/java/org/apache/camel/component/jira/oauth/OAuthHttpClientDecorator.java
|
{
"start": 3565,
"end": 4175
}
|
class ____ extends DefaultRequest.DefaultRequestBuilder {
Request.Method method;
OAuthAuthenticatedRequestBuilder() {
super(httpClient);
}
@Override
public ResponsePromise execute(Request.Method method) {
if (authenticationHandler != null) {
this.setMethod(method);
this.method = method;
authenticationHandler.configure(this);
}
return super.execute(method);
}
public URI getUri() {
return uri;
}
}
}
|
OAuthAuthenticatedRequestBuilder
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
|
{
"start": 1571,
"end": 3489
}
|
class ____ extends StorageEvent<SendingRequestEvent> {
private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*";
/**
* Hidden default constructor for SendRequestIntercept.
*/
private SendRequestIntercept() {
}
/**
* Binds a new lister to the operation context so the WASB file system can
* appropriately intercept sends and allow concurrent OOB I/Os. This
* by-passes the blob immutability check when reading streams.
*
* @param opContext the operation context assocated with this request.
*/
public static void bind(OperationContext opContext) {
opContext.getSendingRequestEventHandler().addListener(new SendRequestIntercept());
}
/**
* Handler which processes the sending request event from Azure SDK. The
* handler simply sets reset the conditional header to make all read requests
* unconditional if reads with concurrent OOB writes are allowed.
*
* @param sendEvent
* - send event context from Windows Azure SDK.
*/
@Override
public void eventOccurred(SendingRequestEvent sendEvent) {
if (!(sendEvent.getConnectionObject() instanceof HttpURLConnection)) {
// Pass if there is no HTTP connection associated with this send
// request.
return;
}
// Capture the HTTP URL connection object and get size of the payload for
// the request.
HttpURLConnection urlConnection = (HttpURLConnection) sendEvent
.getConnectionObject();
// Determine whether this is a download request by checking that the request
// method
// is a "GET" operation.
if (urlConnection.getRequestMethod().equalsIgnoreCase("GET")) {
// If concurrent reads on OOB writes are allowed, reset the if-match
// condition on the conditional header.
urlConnection.setRequestProperty(HeaderConstants.IF_MATCH,
ALLOW_ALL_REQUEST_PRECONDITIONS);
}
}
}
|
SendRequestIntercept
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/HashicorpVaultEndpointBuilderFactory.java
|
{
"start": 7714,
"end": 10240
}
|
interface ____
extends
EndpointProducerBuilder {
default HashicorpVaultEndpointBuilder basic() {
return (HashicorpVaultEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedHashicorpVaultEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedHashicorpVaultEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public
|
AdvancedHashicorpVaultEndpointBuilder
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertSatisfiesExactly_Test.java
|
{
"start": 1350,
"end": 6529
}
|
class ____ extends IterablesBaseTest {
private List<String> actual = list("Luke", "Leia", "Yoda");
private Consumer<Object>[] requirements = array(element -> assertThat(element).isNotNull());
@Test
void should_pass_when_each_element_satisfies_its_given_requirements() {
// GIVEN
Consumer<String>[] requirements = array(name -> assertThat(name).isNotBlank(),
name -> assertThat(name).startsWith("Lei"),
name -> assertThat(name).endsWith("da"));
// WHEN/THEN
iterables.assertSatisfiesExactly(info, actual, requirements);
}
@Test
void should_pass_when_both_actual_and_requirements_are_empty() {
// GIVEN
Consumer<String>[] requirements = array();
actual.clear();
// WHEN/THEN
iterables.assertSatisfiesExactly(info, actual, requirements);
}
@Test
void should_fail_when_any_element_is_not_satisfying_its_requirements() {
// GIVEN
Consumer<String>[] requirements = array(name -> assertThat(name).isNotBlank(),
name -> assertThat(name).startsWith("Han"),
name -> assertThat(name).endsWith("da"));
// WHEN
var error = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
// can't build the exact error message due to internal stack traces
then(error).hasMessageStartingWith(format("%n" +
"Expecting each element of:%n" +
" %s%n" +
"to satisfy the requirements at its index, but these elements did not:%n%n",
info.representation().toStringOf(actual)))
.hasMessageContaining(shouldStartWith("Leia", "Han").create());
}
@Test
void should_fail_when_multiple_elements_are_not_satisfying_their_respective_requirements() {
// GIVEN
Consumer<String>[] requirements = array(name -> assertThat(name).isNotBlank(),
name -> assertThat(name).startsWith("Han"),
name -> assertThat(name).startsWith("Io"));
// WHEN
var error = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
// can't build the exact error message due to internal stack traces
then(error).hasMessageStartingWith(format("%n" +
"Expecting each element of:%n" +
" %s%n" +
"to satisfy the requirements at its index, but these elements did not:%n%n",
info.representation().toStringOf(actual)))
.hasMessageContaining(shouldStartWith("Leia", "Han").create())
.hasMessageContaining(shouldStartWith("Yoda", "Io").create());
}
@Test
void should_fail_when_requirements_are_met_but_not_in_the_right_order() {
// GIVEN
Consumer<String>[] requirements = array(name -> assertThat(name).isNotBlank(),
name -> assertThat(name).startsWith("Yo"),
name -> assertThat(name).startsWith("Lei"));
// WHEN
// WHEN
var error = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
// can't build the exact error message due to internal stack traces
then(error).hasMessageStartingWith(format("%n" +
"Expecting each element of:%n" +
" %s%n" +
"to satisfy the requirements at its index, but these elements did not:%n%n",
info.representation().toStringOf(actual)))
.hasMessageContaining(shouldStartWith("Leia", "Yo").create())
.hasMessageContaining(shouldStartWith("Yoda", "Lei").create());
}
@Test
void should_fail_when_actual_and_requirements_have_different_sizes() {
// WHEN
var error = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
then(error).hasMessage(shouldHaveSameSizeAs(actual, requirements, actual.size(), requirements.length).create());
}
@Test
void should_fail_if_requirements_is_null() {
// GIVEN
Consumer<Object>[] requirements = null;
// WHEN
var error = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
then(error).hasMessage("%nExpecting an array but was: null".formatted());
}
@Test
void should_fail_when_actual_is_null() {
// GIVEN
List<Object> actual = null;
// WHEN
var assertionError = expectAssertionError(() -> iterables.assertSatisfiesExactly(info, actual, requirements));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
}
|
Iterables_assertSatisfiesExactly_Test
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java
|
{
"start": 952,
"end": 1204
}
|
class ____ extends FileContextUtilBase {
@BeforeEach
public void setUp() throws IOException, Exception {
Configuration conf = new Configuration();
fc = S3ATestUtils.createTestFileContext(conf);
super.setUp();
}
}
|
ITestS3AFileContextUtil
|
java
|
playframework__playframework
|
cache/play-ehcache/src/main/java/play/cache/ehcache/EhCacheComponents.java
|
{
"start": 1435,
"end": 2266
}
|
interface ____ extends ConfigurationComponents, PekkoComponents {
Environment environment();
ApplicationLifecycle applicationLifecycle();
default CacheManager ehCacheManager() {
return new CacheManagerProvider(
environment().asScala(), configuration(), applicationLifecycle().asScala())
.get();
}
default AsyncCacheApi cacheApi(String name) {
boolean createNamedCaches = config().getBoolean("play.cache.createBoundCaches");
play.api.cache.AsyncCacheApi scalaAsyncCacheApi =
new EhCacheApi(
NamedEhCacheProvider$.MODULE$.getNamedCache(name, ehCacheManager(), createNamedCaches),
executionContext());
return new DefaultAsyncCacheApi(scalaAsyncCacheApi);
}
default AsyncCacheApi defaultCacheApi() {
return cacheApi("play");
}
}
|
EhCacheComponents
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/skylinestore/api/PredictionSkylineStore.java
|
{
"start": 1307,
"end": 2447
}
|
interface ____ {
/**
* Add job's predicted {@code Resource} allocation to the <em>store</em>
* indexed by the {@code
* pipelineId}.
* <p> Note that right now we only keep the latest copy of predicted
* {@code Resource} allocation for the recurring pipeline.
*
* @param pipelineId the id of the recurring pipeline.
* @param resourceOverTime the predicted {@code Resource} allocation for the
* pipeline.
* @throws SkylineStoreException if input parameters are invalid.
*/
void addEstimation(String pipelineId,
RLESparseResourceAllocation resourceOverTime)
throws SkylineStoreException;
/**
* Return the predicted {@code Resource} allocation for the pipeline.
* <p> If the pipelineId does not exist, it will return <em>null</em>.
*
* @param pipelineId the unique id of the pipeline.
* @return the predicted {@code Resource} allocation for the pipeline.
* @throws SkylineStoreException if pipelineId is <em>null</em>.
*/
RLESparseResourceAllocation getEstimation(String pipelineId)
throws SkylineStoreException;
}
|
PredictionSkylineStore
|
java
|
google__guava
|
android/guava-testlib/test/com/google/common/testing/ArbitraryInstancesTest.java
|
{
"start": 20392,
"end": 20512
}
|
class ____ {
public NoDefaultConstructor(@SuppressWarnings("unused") int i) {}
}
public static
|
NoDefaultConstructor
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/OnNextFailureStrategy.java
|
{
"start": 8457,
"end": 9401
}
|
class ____ implements OnNextFailureStrategy {
private final BiFunction<? super Throwable, Object, ? extends Throwable> delegateProcessor;
private final BiPredicate<? super Throwable, Object> delegatePredicate;
@SuppressWarnings("unchecked")
public LambdaOnNextErrorStrategy(
BiFunction<? super Throwable, Object, ? extends Throwable> delegateProcessor) {
this.delegateProcessor = delegateProcessor;
if (delegateProcessor instanceof BiPredicate) {
this.delegatePredicate = (BiPredicate<? super Throwable, Object>) delegateProcessor;
}
else {
this.delegatePredicate = (e, v) -> true;
}
}
@Override
public boolean test(Throwable error, @Nullable Object value) {
return delegatePredicate.test(error, value);
}
@Override
public @Nullable Throwable process(Throwable error, @Nullable Object value, Context ignored) {
return delegateProcessor.apply(error, value);
}
}
}
|
LambdaOnNextErrorStrategy
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsTests.java
|
{
"start": 23819,
"end": 26588
}
|
class ____ {
@SuppressWarnings("DataFlowIssue")
@Test
void isAssignableToForNullClass() {
assertPreconditionViolationFor(() -> ReflectionUtils.isAssignableTo(new Object(), null));
}
@Test
void isAssignableTo() {
// Reference Types
assertTrue(ReflectionUtils.isAssignableTo("string", String.class));
assertTrue(ReflectionUtils.isAssignableTo("string", CharSequence.class));
assertTrue(ReflectionUtils.isAssignableTo("string", Object.class));
assertFalse(ReflectionUtils.isAssignableTo(new Object(), String.class));
assertFalse(ReflectionUtils.isAssignableTo(Integer.valueOf("1"), StringBuilder.class));
assertFalse(ReflectionUtils.isAssignableTo(new StringBuilder(), String.class));
// Arrays
assertTrue(ReflectionUtils.isAssignableTo(new int[0], int[].class));
assertTrue(ReflectionUtils.isAssignableTo(new double[0], Object.class));
assertTrue(ReflectionUtils.isAssignableTo(new String[0], String[].class));
assertTrue(ReflectionUtils.isAssignableTo(new String[0], Object.class));
// Primitive Types
assertTrue(ReflectionUtils.isAssignableTo(1, int.class));
assertTrue(ReflectionUtils.isAssignableTo(Long.valueOf("1"), long.class));
assertTrue(ReflectionUtils.isAssignableTo(Boolean.TRUE, boolean.class));
// Widening Conversions to Primitives
assertTrue(ReflectionUtils.isAssignableTo(1, long.class));
assertTrue(ReflectionUtils.isAssignableTo(1f, double.class));
assertTrue(ReflectionUtils.isAssignableTo((byte) 1, double.class));
// Widening Conversions to Wrappers (not supported by Java)
assertFalse(ReflectionUtils.isAssignableTo(1, Long.class));
assertFalse(ReflectionUtils.isAssignableTo(1f, Double.class));
assertFalse(ReflectionUtils.isAssignableTo((byte) 1, Double.class));
// Narrowing Conversions
assertFalse(ReflectionUtils.isAssignableTo(1, char.class));
assertFalse(ReflectionUtils.isAssignableTo(1L, byte.class));
assertFalse(ReflectionUtils.isAssignableTo(1L, int.class));
}
@Test
void isAssignableToForNullObject() {
assertTrue(ReflectionUtils.isAssignableTo((Object) null, Object.class));
assertTrue(ReflectionUtils.isAssignableTo((Object) null, String.class));
assertTrue(ReflectionUtils.isAssignableTo((Object) null, Long.class));
assertTrue(ReflectionUtils.isAssignableTo((Object) null, Character[].class));
}
@Test
void isAssignableToForNullObjectAndPrimitive() {
assertFalse(ReflectionUtils.isAssignableTo((Object) null, byte.class));
assertFalse(ReflectionUtils.isAssignableTo((Object) null, int.class));
assertFalse(ReflectionUtils.isAssignableTo((Object) null, long.class));
assertFalse(ReflectionUtils.isAssignableTo((Object) null, boolean.class));
}
}
@Nested
|
IsObjectAssignableToClassTests
|
java
|
apache__camel
|
components/camel-thymeleaf/src/test/java/org/apache/camel/component/thymeleaf/ThymeleafDefaultResolverTest.java
|
{
"start": 1561,
"end": 4591
}
|
class ____ extends ThymeleafAbstractBaseTest {
@Test
public void testThymeleaf() throws InterruptedException {
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedMessageCount(1);
mock.message(0).body().contains(THANK_YOU_FOR_YOUR_ORDER);
mock.message(0).body().endsWith(SPAZZ_TESTING_SERVICE);
mock.message(0).header(ThymeleafConstants.THYMELEAF_TEMPLATE).isNull();
mock.message(0).header(ThymeleafConstants.THYMELEAF_VARIABLE_MAP).isNull();
mock.message(0).header(FIRST_NAME).isEqualTo(JANE);
template.request(DIRECT_START, templateHeaderProcessor);
mock.assertIsSatisfied();
ThymeleafEndpoint thymeleafEndpoint = context.getEndpoint(
"thymeleaf:dontcare?allowTemplateFromHeader=true&templateMode=HTML&allowContextMapAll=true&resolver=DEFAULT",
ThymeleafEndpoint.class);
assertAll("properties",
() -> assertNotNull(thymeleafEndpoint),
() -> assertTrue(thymeleafEndpoint.isAllowContextMapAll()),
() -> assertNull(thymeleafEndpoint.getCacheable()),
() -> assertNull(thymeleafEndpoint.getCacheTimeToLive()),
() -> assertNull(thymeleafEndpoint.getCheckExistence()),
() -> assertNull(thymeleafEndpoint.getEncoding()),
() -> assertEquals(ExchangePattern.InOut, thymeleafEndpoint.getExchangePattern()),
() -> assertNull(thymeleafEndpoint.getOrder()),
() -> assertNull(thymeleafEndpoint.getPrefix()),
() -> assertEquals(ThymeleafResolverType.DEFAULT, thymeleafEndpoint.getResolver()),
() -> assertNull(thymeleafEndpoint.getSuffix()),
() -> assertNotNull(thymeleafEndpoint.getTemplateEngine()),
() -> assertEquals(HTML, thymeleafEndpoint.getTemplateMode()));
assertEquals(1, thymeleafEndpoint.getTemplateEngine().getTemplateResolvers().size());
ITemplateResolver resolver = thymeleafEndpoint.getTemplateEngine().getTemplateResolvers().stream().findFirst().get();
assertTrue(resolver instanceof DefaultTemplateResolver);
DefaultTemplateResolver templateResolver = (DefaultTemplateResolver) resolver;
assertAll("templateResolver",
() -> assertFalse(templateResolver.getCheckExistence()),
() -> assertNull(templateResolver.getOrder()),
() -> assertEquals(TemplateMode.HTML, templateResolver.getTemplateMode()));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(DIRECT_START)
.setBody(simple(SPAZZ_TESTING_SERVICE))
.to("thymeleaf:dontcare?allowTemplateFromHeader=true&templateMode=HTML&allowContextMapAll=true&resolver=DEFAULT")
.to(MOCK_RESULT);
}
};
}
}
|
ThymeleafDefaultResolverTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestPrometheusMetricsSink.java
|
{
"start": 9372,
"end": 9755
}
|
class ____ {
private String id;
TestMetrics() {
this("1");
}
TestMetrics(String id) {
this.id = id;
}
@Metric(value={"testTag", ""}, type=Type.TAG)
String testTag1() {
return "testTagValue" + id;
}
@Metric
private MutableCounterLong numBucketCreateFails;
}
/**
* Example metric TopMetrics.
*/
private
|
TestMetrics
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectL_B.java
|
{
"start": 95,
"end": 579
}
|
class ____ {
private int a;
private List<Integer> b;
private List<Integer> c;
private long d;
public int getA() {
return a;
}
public void setA(int a) {
this.a = a;
}
public List<Integer> getB() {
return b;
}
public void setB(List<Integer> b) {
this.b = b;
}
public List<Integer> getC() {
return c;
}
public void setC(List<Integer> c) {
this.c = c;
}
public long getD() {
return d;
}
public void setD(long d) {
this.d = d;
}
}
|
ObjectL_B
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateReleasingTests.java
|
{
"start": 1148,
"end": 4641
}
|
class ____ extends ESTestCase {
@ParametersFactory
public static Iterable<Object[]> parameters() {
return Arrays.stream(TDigestState.Type.values()).map(type -> new Object[] { type }).toList();
}
private final TDigestState.Type digestType;
public TDigestStateReleasingTests(TDigestState.Type digestType) {
this.digestType = digestType;
}
public void testCreateOfType() {
testCircuitBreakerTrip(circuitBreaker -> TDigestState.createOfType(circuitBreaker, digestType, 100));
}
public void testCreateUsingParamsFrom() {
testCircuitBreakerTrip(circuitBreaker -> {
try (TDigestState example = TDigestState.createOfType(newLimitedBreaker(ByteSizeValue.ofMb(100)), digestType, 100)) {
return TDigestState.createUsingParamsFrom(example);
}
});
}
/**
* This test doesn't use the {@code digestType} param.
*/
public void testCreate() {
testCircuitBreakerTrip(circuitBreaker -> TDigestState.create(circuitBreaker, 100));
}
/**
* This test doesn't use the {@code digestType} param.
*/
public void testCreateOptimizedForAccuracy() {
testCircuitBreakerTrip(circuitBreaker -> TDigestState.createOptimizedForAccuracy(circuitBreaker, 100));
}
public void testRead() throws IOException {
try (
TDigestState state = TDigestState.createOfType(newLimitedBreaker(ByteSizeValue.ofMb(100)), digestType, 100);
BytesStreamOutput output = new BytesStreamOutput()
) {
TDigestState.write(state, output);
testCircuitBreakerTrip(circuitBreaker -> {
try (StreamInput input = output.bytes().streamInput()) {
return TDigestState.read(circuitBreaker, input);
}
});
}
}
public void testReadWithData() throws IOException {
try (
TDigestState state = TDigestState.createOfType(newLimitedBreaker(ByteSizeValue.ofMb(100)), digestType, 100);
BytesStreamOutput output = new BytesStreamOutput()
) {
for (int i = 0; i < 1000; i++) {
state.add(randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true));
}
TDigestState.write(state, output);
testCircuitBreakerTrip(circuitBreaker -> {
try (StreamInput input = output.bytes().streamInput()) {
return TDigestState.read(circuitBreaker, input);
}
});
}
}
/**
* Tests that a circuit breaker trip leaves no unreleased memory.
*/
public <E extends Exception> void testCircuitBreakerTrip(CheckedFunction<CircuitBreaker, TDigestState, E> tDigestStateFactory)
throws E {
CrankyCircuitBreakerService circuitBreakerService = new CrankyCircuitBreakerService();
CircuitBreaker breaker = circuitBreakerService.getBreaker("test");
try (TDigestState state = tDigestStateFactory.apply(breaker)) {
// Add some data to make it trip. It won't work in all digest types
for (int i = 0; i < 10; i++) {
state.add(randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true));
}
} catch (CircuitBreakingException e) {
// Expected
} finally {
assertThat("unreleased bytes", breaker.getUsed(), equalTo(0L));
}
}
}
|
TDigestStateReleasingTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java
|
{
"start": 5414,
"end": 5956
}
|
class ____ implements Test {
public Object[] xs = {};
public Class<? extends Annotation> annotationType() {
return null;
}
}
""")
.doTest();
}
@Test
public void annotationsDefaultToImmutable() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.ThreadSafe;
import javax.lang.model.element.ElementKind;
@ThreadSafe
|
MyTest
|
java
|
quarkusio__quarkus
|
independent-projects/arc/runtime/src/test/java/io/quarkus/arc/impl/WildcardTypeImplTest.java
|
{
"start": 199,
"end": 912
}
|
class ____ {
@Test
public void testEqualsAndHashCode() {
assertEquals(WildcardTypeImpl.defaultInstance(), WildcardTypeImpl.withUpperBound(Object.class));
assertEquals(WildcardTypeImpl.withLowerBound(String.class), WildcardTypeImpl.withLowerBound(String.class));
assertNotEquals(WildcardTypeImpl.withLowerBound(String.class), WildcardTypeImpl.withLowerBound(Integer.class));
assertEquals(WildcardTypeImpl.defaultInstance().hashCode(), WildcardTypeImpl.withUpperBound(Object.class).hashCode());
assertEquals(WildcardTypeImpl.withLowerBound(String.class).hashCode(),
WildcardTypeImpl.withLowerBound(String.class).hashCode());
}
}
|
WildcardTypeImplTest
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/base/BenchmarkHelpers.java
|
{
"start": 1984,
"end": 3140
}
|
enum ____ {
WHITESPACE(CharMatcher.whitespace(), WHITESPACE_CHARACTERS),
HASH(CharMatcher.is('#'), "#"),
ASCII(CharMatcher.ascii(), ASCII_CHARACTERS),
WESTERN_DIGIT("0123456789"),
ALL_DIGIT(CharMatcher.digit(), ALL_DIGITS),
OPS_5("+-*/%"),
HEX_16(CharMatcher.inRange('0', '9').or(CharMatcher.inRange('A', 'F')), "0123456789ABCDEF"),
HEX_22(
CharMatcher.inRange('0', '9')
.or(CharMatcher.inRange('A', 'F'))
.or(CharMatcher.inRange('a', 'f')),
"0123456789ABCDEFabcdef"),
GERMAN_59(
CharMatcher.inRange('a', 'z')
.or(CharMatcher.inRange('A', 'Z'))
.or(CharMatcher.anyOf("äöüßÄÖÜ")),
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZäöüßÄÖÜ");
public final CharMatcher matcher;
public final String matchingChars;
SampleMatcherConfig(String matchingChars) {
this(CharMatcher.anyOf(matchingChars), matchingChars);
}
SampleMatcherConfig(CharMatcher matcher, String matchingChars) {
this.matcher = matcher;
this.matchingChars = matchingChars;
}
}
private BenchmarkHelpers() {}
}
|
SampleMatcherConfig
|
java
|
netty__netty
|
testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketBufReleaseTest.java
|
{
"start": 2869,
"end": 4316
}
|
class ____ extends SimpleChannelInboundHandler<Object> {
private final Random random = new Random();
private final CountDownLatch latch = new CountDownLatch(1);
private ByteBuf buf;
private final Promise<Channel> channelFuture = new DefaultPromise<Channel>(executor);
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
channelFuture.setSuccess(ctx.channel());
}
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
byte[] data = new byte[1024];
random.nextBytes(data);
buf = ctx.alloc().buffer();
// call retain on it so it can't be put back on the pool
buf.writeBytes(data).retain();
ctx.channel().writeAndFlush(buf).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
latch.countDown();
}
});
}
@Override
public void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
// discard
}
public void check() throws InterruptedException {
latch.await();
assertEquals(1, buf.refCnt());
}
void release() {
buf.release();
}
}
}
|
BufWriterHandler
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableRefCountTest.java
|
{
"start": 28764,
"end": 29409
}
|
class ____ extends ConnectableFlowable<Object> {
@Override
public void connect(Consumer<? super Disposable> connection) {
try {
connection.accept(Disposable.empty());
} catch (Throwable ex) {
throw ExceptionHelper.wrapOrThrow(ex);
}
}
@Override
public void reset() {
throw new TestException("dispose");
}
@Override
protected void subscribeActual(Subscriber<? super Object> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
}
}
static final
|
BadFlowableDispose
|
java
|
apache__camel
|
components/camel-ehcache/src/generated/java/org/apache/camel/component/ehcache/EhcacheProducerInvokeOnHeaderFactory.java
|
{
"start": 397,
"end": 1708
}
|
class ____ implements InvokeOnHeaderStrategy {
@Override
public Object invoke(Object obj, String key, Exchange exchange, AsyncCallback callback) throws Exception {
org.apache.camel.component.ehcache.EhcacheProducer target = (org.apache.camel.component.ehcache.EhcacheProducer) obj;
switch (key) {
case "clear":
case "CLEAR": target.onClear(exchange.getMessage()); return null;
case "get":
case "GET": target.onGet(exchange.getMessage()); return null;
case "get_all":
case "GET_ALL": target.onGetAll(exchange.getMessage()); return null;
case "put":
case "PUT": target.onPut(exchange.getMessage()); return null;
case "put_all":
case "PUT_ALL": target.onPutAll(exchange.getMessage()); return null;
case "put_if_absent":
case "PUT_IF_ABSENT": target.onPutIfAbsent(exchange.getMessage()); return null;
case "remove":
case "REMOVE": target.onRemove(exchange.getMessage()); return null;
case "remove_all":
case "REMOVE_ALL": target.onRemoveAll(exchange.getMessage()); return null;
case "replace":
case "REPLACE": target.onReplace(exchange.getMessage()); return null;
default: return null;
}
}
}
|
EhcacheProducerInvokeOnHeaderFactory
|
java
|
FasterXML__jackson-databind
|
src/test/java/perf/ManualWritePerfWithAllTypes.java
|
{
"start": 1558,
"end": 4178
}
|
enum ____ {
FOO, BAR;
}
protected String ignoreMe;
public String ignoreMe2;
public String ignoreMe3;
public int myInt;
public boolean myBoolean;
public short myShort;
public long myLong;
public String string;
public String string2;
public BigDecimal bigDecimal;
public BigInteger bigInteger;
public Date date;
public float myFloat;
public double myDouble;
public byte myByte;
public FooEnum foo;
public FooEnum bar;
public long someDate = new Date().getTime ();
public AllTypes allType;
public List<AllTypes> allTypes = new ArrayList<AllTypes>();
static AllTypes _small() {
AllTypes small = new AllTypes();
small.ignoreMe = "THIS WILL NOT PASS";
small.ignoreMe2 = "THIS WILL NOT PASS EITHER";
small.ignoreMe3 = "THIS WILL NOT PASS TOO";
small.bigDecimal = new BigDecimal("1.235678900");
small.date = new Date();
small.bar = FooEnum.BAR;
small.foo = FooEnum.FOO;
small.string = "Hi Mom";
small.myDouble = 1.2345d;
small.myFloat = 1.0f;
small.myShort = (short)1;
small.myByte = (byte)1;
return small;
}
public static AllTypes smallObject() {
AllTypes small = _small();
small.allType = _small();
small.allType.string = "Hi Dad";
small.allTypes = Arrays.asList(_small(), _small());
return small;
}
public static AllTypes bigObject() {
AllTypes big = new AllTypes();
final List<AllTypes> list = new ArrayList<AllTypes>();
for (int index = 0; index < 10000; index++) {
AllTypes item = new AllTypes();
item.ignoreMe = "THIS WILL NOT PASS";
item.ignoreMe2 = "THIS WILL NOT PASS EITHER";
item.ignoreMe3 = "THIS WILL NOT PASS TOO";
item.bigDecimal = new BigDecimal("1.235678900");
item.date = new Date ();
item.bar = FooEnum.BAR;
item.foo = FooEnum.FOO;
item.string = "Hi Mom" + System.currentTimeMillis();
item.myDouble = 1.2345d;
item.myFloat = 1.0f;
item.myShort = (short)1;
item.myByte = (byte)1;
list.add(item);
}
big.allTypes = list;
return big;
}
}
}
|
FooEnum
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/RouteStopTest.java
|
{
"start": 1009,
"end": 3282
}
|
class ____ extends ContextTestSupport {
@Test
public void testOtherwise() throws Exception {
getMockEndpoint("mock:hello").expectedMessageCount(0);
getMockEndpoint("mock:bye").expectedMessageCount(0);
getMockEndpoint("mock:other").expectedMessageCount(1);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBody("direct:start", "Other");
assertMockEndpointsSatisfied();
}
@Test
public void testHello() throws Exception {
getMockEndpoint("mock:hello").expectedMessageCount(1);
getMockEndpoint("mock:bye").expectedMessageCount(0);
getMockEndpoint("mock:other").expectedMessageCount(0);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testByeWithStop() throws Exception {
getMockEndpoint("mock:hello").expectedMessageCount(0);
getMockEndpoint("mock:bye").expectedMessageCount(1);
getMockEndpoint("mock:other").expectedMessageCount(0);
// we should stop so no message arrives at result
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBody("direct:start", "Bye World");
assertMockEndpointsSatisfied();
}
@Test
public void testSetPropertyToStop() throws Exception {
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBody("direct:foo", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// START SNIPPET: e1
from("direct:start").choice().when(body().contains("Hello")).to("mock:hello").when(body().contains("Bye"))
.to("mock:bye").stop().otherwise().to("mock:other").end()
.to("mock:result");
// END SNIPPET: e1
from("direct:foo").to("mock:foo").process(e -> e.setRouteStop(true)).to("mock:result");
}
};
}
}
|
RouteStopTest
|
java
|
netty__netty
|
codec-base/src/main/java/io/netty/handler/codec/DecoderResult.java
|
{
"start": 748,
"end": 2310
}
|
class ____ {
protected static final Signal SIGNAL_UNFINISHED = Signal.valueOf(DecoderResult.class, "UNFINISHED");
protected static final Signal SIGNAL_SUCCESS = Signal.valueOf(DecoderResult.class, "SUCCESS");
public static final DecoderResult UNFINISHED = new DecoderResult(SIGNAL_UNFINISHED);
public static final DecoderResult SUCCESS = new DecoderResult(SIGNAL_SUCCESS);
public static DecoderResult failure(Throwable cause) {
return new DecoderResult(ObjectUtil.checkNotNull(cause, "cause"));
}
private final Throwable cause;
protected DecoderResult(Throwable cause) {
this.cause = ObjectUtil.checkNotNull(cause, "cause");
}
public boolean isFinished() {
return cause != SIGNAL_UNFINISHED;
}
public boolean isSuccess() {
return cause == SIGNAL_SUCCESS;
}
public boolean isFailure() {
return cause != SIGNAL_SUCCESS && cause != SIGNAL_UNFINISHED;
}
public Throwable cause() {
if (isFailure()) {
return cause;
} else {
return null;
}
}
@Override
public String toString() {
if (isFinished()) {
if (isSuccess()) {
return "success";
}
String cause = cause().toString();
return new StringBuilder(cause.length() + 17)
.append("failure(")
.append(cause)
.append(')')
.toString();
} else {
return "unfinished";
}
}
}
|
DecoderResult
|
java
|
apache__camel
|
dsl/camel-java-joor-dsl/src/test/resources/routes/MyEcho.java
|
{
"start": 877,
"end": 958
}
|
class ____ {
public String echo(String s) {
return s + s;
}
}
|
MyEcho
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.