language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/config/ConfigBeanDefinitionParser.java | {
"start": 2636,
"end": 5502
} | class ____ implements BeanDefinitionParser {
private static final String ASPECT = "aspect";
private static final String EXPRESSION = "expression";
private static final String ID = "id";
private static final String POINTCUT = "pointcut";
private static final String ADVICE_BEAN_NAME = "adviceBeanName";
private static final String ADVISOR = "advisor";
private static final String ADVICE_REF = "advice-ref";
private static final String POINTCUT_REF = "pointcut-ref";
private static final String REF = "ref";
private static final String BEFORE = "before";
private static final String DECLARE_PARENTS = "declare-parents";
private static final String TYPE_PATTERN = "types-matching";
private static final String DEFAULT_IMPL = "default-impl";
private static final String DELEGATE_REF = "delegate-ref";
private static final String IMPLEMENT_INTERFACE = "implement-interface";
private static final String AFTER = "after";
private static final String AFTER_RETURNING_ELEMENT = "after-returning";
private static final String AFTER_THROWING_ELEMENT = "after-throwing";
private static final String AROUND = "around";
private static final String RETURNING = "returning";
private static final String RETURNING_PROPERTY = "returningName";
private static final String THROWING = "throwing";
private static final String THROWING_PROPERTY = "throwingName";
private static final String ARG_NAMES = "arg-names";
private static final String ARG_NAMES_PROPERTY = "argumentNames";
private static final String ASPECT_NAME_PROPERTY = "aspectName";
private static final String DECLARATION_ORDER_PROPERTY = "declarationOrder";
private static final String ORDER_PROPERTY = "order";
private static final int METHOD_INDEX = 0;
private static final int POINTCUT_INDEX = 1;
private static final int ASPECT_INSTANCE_FACTORY_INDEX = 2;
private final ParseState parseState = new ParseState();
@Override
public @Nullable BeanDefinition parse(Element element, ParserContext parserContext) {
CompositeComponentDefinition compositeDef =
new CompositeComponentDefinition(element.getTagName(), parserContext.extractSource(element));
parserContext.pushContainingComponent(compositeDef);
configureAutoProxyCreator(parserContext, element);
List<Element> childElts = DomUtils.getChildElements(element);
for (Element elt: childElts) {
String localName = parserContext.getDelegate().getLocalName(elt);
switch (localName) {
case POINTCUT -> parsePointcut(elt, parserContext);
case ADVISOR -> parseAdvisor(elt, parserContext);
case ASPECT -> parseAspect(elt, parserContext);
}
}
parserContext.popAndRegisterContainingComponent();
return null;
}
/**
* Configures the auto proxy creator needed to support the {@link BeanDefinition BeanDefinitions}
* created by the '{@code <aop:config/>}' tag. Will force | ConfigBeanDefinitionParser |
java | apache__camel | components/camel-saxon/src/test/java/org/apache/camel/component/xquery/TestBean.java | {
"start": 854,
"end": 1070
} | class ____ {
private String subject;
public void onMethod(@XQuery("/*/@subject") String subject) {
this.subject = subject;
}
public String getSubject() {
return subject;
}
}
| TestBean |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ComparisonOutOfRangeTest.java | {
"start": 1495,
"end": 6235
} | class ____ {
private static final int NOT_A_BYTE = 255;
void byteEquality() {
boolean result;
byte b = 0;
byte[] barr = {1, 2, 3};
// BUG: Diagnostic contains: b == -1
result = b == 255;
// BUG: Diagnostic contains: b == 1
result = b == -255;
// BUG: Diagnostic contains: b == -128
result = b == 128;
// BUG: Diagnostic contains: b != -1
result = b != 255;
// BUG: Diagnostic contains: barr[0] == -1
result = barr[0] == 255;
// BUG: Diagnostic contains:
result = barr[0] == 128;
// BUG: Diagnostic contains: bytes
result = barr[0] == -255;
// BUG: Diagnostic contains: b == -1
result = b == NOT_A_BYTE;
Byte boxed = 0;
// BUG: Diagnostic contains:
result = boxed == 255;
Supplier<? extends Byte> bSupplier = null;
// BUG: Diagnostic contains:
result = bSupplier.get() == 255;
}
void charEquality() throws IOException {
boolean result;
char c = 'A';
Reader reader = null;
// BUG: Diagnostic contains: false
result = c == -1;
// BUG: Diagnostic contains: true
result = c != -1;
char d;
// BUG: Diagnostic contains: chars
result = (d = (char) reader.read()) == -1;
}
void shorts(short s) {
boolean result;
// BUG: Diagnostic contains: false
result = s == Short.MAX_VALUE + 1;
// BUG: Diagnostic contains: false
result = s == Short.MIN_VALUE - 1;
// BUG: Diagnostic contains: true
result = s != Short.MAX_VALUE + 1;
// BUG: Diagnostic contains: true
result = s != Short.MIN_VALUE - 1;
// BUG: Diagnostic contains: false
result = s > Short.MAX_VALUE;
// BUG: Diagnostic contains: true
result = s > Short.MIN_VALUE - 1;
// BUG: Diagnostic contains: false
result = s >= Short.MAX_VALUE + 1;
// BUG: Diagnostic contains: true
result = s >= Short.MIN_VALUE;
// BUG: Diagnostic contains: false
result = s < Short.MIN_VALUE;
// BUG: Diagnostic contains: true
result = s < Short.MAX_VALUE + 1;
// BUG: Diagnostic contains: false
result = s <= Short.MIN_VALUE - 1;
// BUG: Diagnostic contains: true
result = s <= Short.MAX_VALUE;
}
void shortsReversed(short s) {
boolean result;
// BUG: Diagnostic contains: false
result = Short.MAX_VALUE < s;
// BUG: Diagnostic contains: true
result = Short.MIN_VALUE - 1 < s;
// BUG: Diagnostic contains: false
result = Short.MAX_VALUE + 1 <= s;
// BUG: Diagnostic contains: true
result = Short.MIN_VALUE <= s;
// BUG: Diagnostic contains: false
result = Short.MIN_VALUE > s;
// BUG: Diagnostic contains: true
result = Short.MAX_VALUE + 1 > s;
// BUG: Diagnostic contains: false
result = Short.MIN_VALUE - 1 >= s;
// BUG: Diagnostic contains: true
result = Short.MAX_VALUE >= s;
}
void ints(int i) {
boolean result;
// BUG: Diagnostic contains: false
result = i == Integer.MAX_VALUE + 1L;
}
void longs(long l) {
boolean result;
// BUG: Diagnostic contains: false
result = l == Long.MIN_VALUE * 2.0;
}
}\
""")
.doTest();
}
@Test
public void negativeCases() {
compilationHelper
.addSourceLines(
"ComparisonOutOfRangeNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.io.IOException;
import java.io.Reader;
/**
* @author Bill Pugh (bill.pugh@gmail.com)
*/
| ComparisonOutOfRangePositiveCases |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java | {
"start": 53130,
"end": 54449
} | class ____ extends ParserRuleContext {
public TerminalNode SEMICOLON() {
return getToken(PainlessParser.SEMICOLON, 0);
}
public EmptyContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override
public int getRuleIndex() {
return RULE_empty;
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof PainlessParserVisitor) return ((PainlessParserVisitor<? extends T>) visitor).visitEmpty(this);
else return visitor.visitChildren(this);
}
}
public final EmptyContext empty() throws RecognitionException {
EmptyContext _localctx = new EmptyContext(_ctx, getState());
enterRule(_localctx, 16, RULE_empty);
try {
enterOuterAlt(_localctx, 1);
{
setState(216);
match(SEMICOLON);
}
} catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
} finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | EmptyContext |
java | apache__logging-log4j2 | log4j-jpa/src/main/java/org/apache/logging/log4j/core/appender/db/jpa/BasicLogEventEntity.java | {
"start": 10267,
"end": 10443
} | class ____ of the caller of the logger API.
*/
@Override
@Basic
public String getLoggerFqcn() {
return this.getWrappedEvent().getLoggerFqcn();
}
}
| name |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/service/invoker/HttpServiceMethodTests.java | {
"start": 12229,
"end": 12775
} | interface ____ {
@GetExchange
void performGet();
@PostExchange(url = "/url", contentType = APPLICATION_JSON_VALUE, accept = APPLICATION_JSON_VALUE)
void performPost();
@HttpExchange(
method = "GET",
contentType = APPLICATION_JSON_VALUE,
headers = {"CustomHeader=a,b, c", "Content-Type=" + APPLICATION_NDJSON_VALUE})
void performGetWithHeaders();
}
@SuppressWarnings("unused")
@HttpExchange(url = "${baseUrl}", contentType = APPLICATION_CBOR_VALUE, accept = APPLICATION_CBOR_VALUE)
private | MethodLevelAnnotatedService |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/RouteStartupOrderSuspendResumeTest.java | {
"start": 1209,
"end": 2759
} | class ____ extends ContextTestSupport {
@Test
public void testRouteStartupOrderSuspendResume() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
context.suspend();
context.resume();
// assert correct order
DefaultCamelContext dcc = (DefaultCamelContext) context;
List<RouteStartupOrder> order = dcc.getCamelContextExtension().getRouteStartupOrder();
assertEquals(4, order.size());
assertEquals("direct://foo", order.get(0).getRoute().getEndpoint().getEndpointUri());
assertEquals("direct://start", order.get(1).getRoute().getEndpoint().getEndpointUri());
assertEquals("direct://baz", order.get(2).getRoute().getEndpoint().getEndpointUri());
assertEquals("direct://bar", order.get(3).getRoute().getEndpoint().getEndpointUri());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("B").startupOrder(2).to("direct:foo");
from("direct:foo").routeId("A").startupOrder(1).to("mock:result");
from("direct:bar").routeId("D").startupOrder(9).to("direct:bar");
from("direct:baz").routeId("C").startupOrder(5).to("mock:other");
}
};
}
}
| RouteStartupOrderSuspendResumeTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/suppress/CustomSuppressionTest.java | {
"start": 3253,
"end": 3771
} | class ____ {
@SuppressBothCheckers
int identity(int value) {
return value;
}
}
""")
.doTest();
}
@Test
public void myCheckerIsSuppressedWithCustomAnnotationAtLocalVariableScope() {
CompilationTestHelper.newInstance(MyChecker.class, getClass())
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.suppress.CustomSuppressionTest.SuppressBothCheckers;
| Test |
java | resilience4j__resilience4j | resilience4j-retry/src/test/java/io/github/resilience4j/retry/event/RetryEventTest.java | {
"start": 877,
"end": 5464
} | class ____ {
@Test
public void testRetryOnErrorEvent() {
RetryOnErrorEvent retryOnErrorEvent = new RetryOnErrorEvent("test", 2,
new IOException("Bla"));
assertThat(retryOnErrorEvent.getName()).isEqualTo("test");
assertThat(retryOnErrorEvent.getNumberOfRetryAttempts()).isEqualTo(2);
assertThat(retryOnErrorEvent.getEventType()).isEqualTo(Type.ERROR);
assertThat(retryOnErrorEvent.getLastThrowable()).isInstanceOf(IOException.class);
assertThat(retryOnErrorEvent.toString()).contains(
"Retry 'test' recorded a failed retry attempt. Number of retry attempts: '2'. Giving up. Last exception was: 'java.io.IOException: Bla'.");
}
@Test
public void testRetryOnErrorEventWithNullLastThrowable() {
RetryOnErrorEvent retryOnErrorEvent = new RetryOnErrorEvent("test", 2, null);
assertThat(retryOnErrorEvent.getLastThrowable()).isNull();
assertThat(retryOnErrorEvent.toString()).contains(
"Retry 'test' recorded a failed retry attempt. Number of retry attempts: '2'. Giving up. Last exception was: 'null'.");
}
@Test
public void testRetryOnSuccessEvent() {
RetryOnSuccessEvent retryOnSuccessEvent = new RetryOnSuccessEvent("test", 2,
new IOException("Bla"));
assertThat(retryOnSuccessEvent.getName()).isEqualTo("test");
assertThat(retryOnSuccessEvent.getNumberOfRetryAttempts()).isEqualTo(2);
assertThat(retryOnSuccessEvent.getEventType()).isEqualTo(Type.SUCCESS);
assertThat(retryOnSuccessEvent.getLastThrowable()).isInstanceOf(IOException.class);
assertThat(retryOnSuccessEvent.toString()).contains(
"Retry 'test' recorded a successful retry attempt. Number of retry attempts: '2', Last exception was: 'java.io.IOException: Bla'.");
}
@Test
public void testRetryOnSuccessEventWithNullLastThrowable() {
RetryOnSuccessEvent retryOnSuccessEvent = new RetryOnSuccessEvent("test", 2, null);
assertThat(retryOnSuccessEvent.getLastThrowable()).isNull();
assertThat(retryOnSuccessEvent.toString()).contains(
"Retry 'test' recorded a successful retry attempt. Number of retry attempts: '2', Last exception was: 'null'.");
}
@Test
public void testRetryOnIgnoredErrorEvent() {
RetryOnIgnoredErrorEvent retryOnIgnoredErrorEvent = new RetryOnIgnoredErrorEvent("test",
new IOException("Bla"));
assertThat(retryOnIgnoredErrorEvent.getName()).isEqualTo("test");
assertThat(retryOnIgnoredErrorEvent.getNumberOfRetryAttempts()).isZero();
assertThat(retryOnIgnoredErrorEvent.getEventType()).isEqualTo(Type.IGNORED_ERROR);
assertThat(retryOnIgnoredErrorEvent.getLastThrowable()).isInstanceOf(IOException.class);
assertThat(retryOnIgnoredErrorEvent.toString()).contains(
"Retry 'test' recorded an error which has been ignored: 'java.io.IOException: Bla'.");
}
@Test
public void testRetryOnIgnoredErrorEventWithNullLastThrowable() {
RetryOnIgnoredErrorEvent retryOnIgnoredErrorEvent = new RetryOnIgnoredErrorEvent("test", null);
assertThat(retryOnIgnoredErrorEvent.getLastThrowable()).isNull();
assertThat(retryOnIgnoredErrorEvent.toString()).contains(
"Retry 'test' recorded an error which has been ignored: 'null'.");
}
@Test
public void testRetryOnRetryEvent() {
RetryOnRetryEvent retryOnRetryEvent = new RetryOnRetryEvent("test", 2,
new IOException("Bla"), 1234L);
assertThat(retryOnRetryEvent.getName()).isEqualTo("test");
assertThat(retryOnRetryEvent.getNumberOfRetryAttempts()).isEqualTo(2);
assertThat(retryOnRetryEvent.getEventType()).isEqualTo(Type.RETRY);
assertThat(retryOnRetryEvent.getLastThrowable()).isInstanceOf(IOException.class);
assertThat(retryOnRetryEvent.getWaitInterval()).isEqualTo(Duration.ofMillis(1234L));
assertThat(retryOnRetryEvent.toString()).contains(
"Retry 'test', waiting PT1.234S until attempt '2'. Last attempt failed with exception 'java.io.IOException: Bla'.");
}
@Test
public void testRetryOnRetryEventWithNullLastThrowable() {
RetryOnRetryEvent retryOnRetryEvent = new RetryOnRetryEvent("test", 2, null, 500L);
assertThat(retryOnRetryEvent.getLastThrowable()).isNull();
assertThat(retryOnRetryEvent.toString()).contains(
"Retry 'test', waiting PT0.5S until attempt '2'. Last attempt failed with exception 'null'.");
}
}
| RetryEventTest |
java | spring-projects__spring-boot | module/spring-boot-flyway/src/test/java/org/springframework/boot/flyway/autoconfigure/FlywayAutoConfigurationTests.java | {
"start": 47630,
"end": 47976
} | class ____ extends AbstractUserH2DataSourceConfiguration {
@Override
protected String getDatabaseName(DataSourceProperties properties) {
String result = properties.determineDatabaseName();
assertThat(result).isNotNull();
return result;
}
}
@Configuration(proxyBeanMethods = false)
static | PropertiesBackedH2DataSourceConfiguration |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DuplicateMapKeysTest.java | {
"start": 1736,
"end": 1965
} | class ____ {
public static void test() {
Map<String, String> map =
Map.ofEntries(
entry("Foo", "Bar"), entry("Ping", "Pong"), entry("Kit", "Kat"), entry("Food", "Bar"));
}
}
""")
.doTest();
}
}
| A |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/Pool40.java | {
"start": 19406,
"end": 19742
} | class ____ {
final EventLoop eventLoop;
final ResizerConnection connection;
private PoolEntry(EventLoop eventLoop, ResizerConnection connection) {
this.eventLoop = eventLoop;
this.connection = connection;
}
abstract boolean tryEarmarkForRequest();
}
final | PoolEntry |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/internal/EntityEntryImpl.java | {
"start": 21135,
"end": 21569
} | enum ____ " + enumType.getName()
+ " in compressed state as it has too many values." );
}
this.offset = offset;
this.enumConstants = enumConstants;
// a mask for reading the four bits, starting at the right offset
this.mask = 0xF << offset;
// a mask for setting the four bits at the right offset to 0
this.unsetMask = 0xFFFF & ~mask;
}
/**
* Returns the numeric value to be stored for the given | type |
java | netty__netty | codec-marshalling/src/main/java/io/netty/handler/codec/marshalling/UnmarshallerProvider.java | {
"start": 890,
"end": 1100
} | interface ____ {
/**
* Get the {@link Unmarshaller} for the given {@link ChannelHandlerContext}
*/
Unmarshaller getUnmarshaller(ChannelHandlerContext ctx) throws Exception;
}
| UnmarshallerProvider |
java | grpc__grpc-java | util/src/test/java/io/grpc/util/MultiChildLoadBalancerTest.java | {
"start": 16080,
"end": 17122
} | class ____ extends SubchannelPicker {
Map<Object, SubchannelPicker> childPickerMap;
Map<Object, ConnectivityState> childStates = new HashMap<>();
TestSubchannelPicker(
Map<Object, SubchannelPicker> childPickers, Map<Object, ConnectivityState> childStates) {
this.childPickerMap = childPickers;
this.childStates = childStates;
}
List<Subchannel> getReadySubchannels() {
List<Subchannel> readySubchannels = new ArrayList<>();
for ( Map.Entry<Object, ConnectivityState> cur : childStates.entrySet()) {
if (cur.getValue() == READY) {
Subchannel s = childPickerMap.get(cur.getKey()).pickSubchannel(null).getSubchannel();
readySubchannels.add(s);
}
}
return readySubchannels;
}
@Override
public PickResult pickSubchannel(PickSubchannelArgs args) {
return childPickerMap.values().iterator().next().pickSubchannel(args); // Always use the 1st
}
}
}
private | TestSubchannelPicker |
java | elastic__elasticsearch | libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/geoip/GeoIpProcessorFactoryBridge.java | {
"start": 747,
"end": 1214
} | class ____ extends ProcessorFactoryBridge.ProxyInternal {
public static GeoIpProcessorFactoryBridge create(final IpDatabaseProviderBridge bridgedIpDatabaseProvider) {
return new GeoIpProcessorFactoryBridge(bridgedIpDatabaseProvider);
}
GeoIpProcessorFactoryBridge(final IpDatabaseProviderBridge ipDatabaseProviderBridge) {
super(new GeoIpProcessor.Factory("geoip", ipDatabaseProviderBridge.toInternal()));
}
}
| GeoIpProcessorFactoryBridge |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/test/java/org/springframework/boot/devtools/livereload/LiveReloadServerTests.java | {
"start": 7147,
"end": 7670
} | class ____ extends Connection {
MonitoredConnection(java.net.Socket socket, InputStream inputStream, OutputStream outputStream)
throws IOException {
super(socket, inputStream, outputStream);
}
@Override
public void run() throws Exception {
try {
super.run();
}
catch (ConnectionClosedException ex) {
synchronized (MonitoredLiveReloadServer.this.monitor) {
MonitoredLiveReloadServer.this.closedExceptions.add(ex);
}
throw ex;
}
}
}
}
| MonitoredConnection |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportResourceTests.java | {
"start": 5771,
"end": 6056
} | class ____ {
@Before("execution(* org.springframework.beans.testfixture.beans.TestBean.*(..))")
public void advice() { }
}
@Configuration
@ImportResource("classpath:org/springframework/context/annotation/configuration/ImportXmlWithConfigurationClass-context.xml")
static | AnAspect |
java | elastic__elasticsearch | modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java | {
"start": 2055,
"end": 7823
} | class ____ extends ESTestCase {
private final DataStreamLifecycleService dataStreamLifecycleService = mock(DataStreamLifecycleService.class);
private final DataStreamLifecycleErrorStore errorStore = mock(DataStreamLifecycleErrorStore.class);
private final TransportGetDataStreamLifecycleStatsAction action = new TransportGetDataStreamLifecycleStatsAction(
mock(TransportService.class),
mock(ClusterService.class),
mock(ThreadPool.class),
mock(ActionFilters.class),
dataStreamLifecycleService,
TestProjectResolvers.alwaysThrow()
);
private Long lastRunDuration;
private Long timeBetweenStarts;
@Before
public void setUp() throws Exception {
super.setUp();
lastRunDuration = randomBoolean() ? randomLongBetween(0, 100000) : null;
timeBetweenStarts = randomBoolean() ? randomLongBetween(0, 100000) : null;
when(dataStreamLifecycleService.getLastRunDuration()).thenReturn(lastRunDuration);
when(dataStreamLifecycleService.getTimeBetweenStarts()).thenReturn(timeBetweenStarts);
when(dataStreamLifecycleService.getErrorStore()).thenReturn(errorStore);
when(errorStore.getAllIndices(any())).thenReturn(Set.of());
}
public void testEmptyClusterState() {
GetDataStreamLifecycleStatsAction.Response response = action.collectStats(ProjectMetadata.builder(randomUniqueProjectId()).build());
assertThat(response.getRunDuration(), is(lastRunDuration));
assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts));
assertThat(response.getDataStreamStats().isEmpty(), is(true));
}
public void testMixedDataStreams() {
Set<String> indicesInError = new HashSet<>();
int numBackingIndices = 3;
ProjectMetadata.Builder builder = ProjectMetadata.builder(randomProjectIdOrDefault());
DataStream ilmDataStream = createDataStream(
builder,
"ilm-managed-index",
numBackingIndices,
Settings.builder()
.put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy")
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()),
null,
Clock.systemUTC().millis()
);
builder.put(ilmDataStream);
DataStream dslDataStream = createDataStream(
builder,
"dsl-managed-index",
numBackingIndices,
settings(IndexVersion.current()),
DataStreamLifecycle.dataLifecycleBuilder().dataRetention(TimeValue.timeValueDays(10)).build(),
Clock.systemUTC().millis()
);
indicesInError.add(dslDataStream.getIndices().get(randomInt(numBackingIndices - 1)).getName());
builder.put(dslDataStream);
{
String dataStreamName = "mixed";
final List<Index> backingIndices = new ArrayList<>();
for (int k = 1; k <= 2; k++) {
IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, k))
.settings(
Settings.builder()
.put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy")
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
)
.numberOfShards(1)
.numberOfReplicas(1)
.creationDate(Clock.systemUTC().millis());
IndexMetadata indexMetadata = indexMetaBuilder.build();
builder.put(indexMetadata, false);
backingIndices.add(indexMetadata.getIndex());
}
// DSL managed write index
IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 3))
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.creationDate(Clock.systemUTC().millis());
MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(Clock.systemUTC().millis() - 2000L));
indexMetaBuilder.putRolloverInfo(
new RolloverInfo(dataStreamName, List.of(rolloverCondition), Clock.systemUTC().millis() - 2000L)
);
IndexMetadata indexMetadata = indexMetaBuilder.build();
builder.put(indexMetadata, false);
backingIndices.add(indexMetadata.getIndex());
builder.put(newInstance(dataStreamName, backingIndices, 3, null, false, DataStreamLifecycle.dataLifecycleBuilder().build()));
}
ProjectMetadata project = builder.build();
when(errorStore.getAllIndices(project.id())).thenReturn(indicesInError);
GetDataStreamLifecycleStatsAction.Response response = action.collectStats(project);
assertThat(response.getRunDuration(), is(lastRunDuration));
assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts));
assertThat(response.getDataStreamStats().size(), is(2));
for (GetDataStreamLifecycleStatsAction.Response.DataStreamStats stats : response.getDataStreamStats()) {
if (stats.dataStreamName().equals("dsl-managed-index")) {
assertThat(stats.backingIndicesInTotal(), is(3));
assertThat(stats.backingIndicesInError(), is(1));
}
if (stats.dataStreamName().equals("mixed")) {
assertThat(stats.backingIndicesInTotal(), is(1));
assertThat(stats.backingIndicesInError(), is(0));
}
}
}
}
| TransportGetDataStreamLifecycleStatsActionTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/mapping/Property.java | {
"start": 17096,
"end": 18272
} | class ____ implements GeneratorCreationContext {
private final RuntimeModelCreationContext context;
public PropertyGeneratorCreationContext(RuntimeModelCreationContext context) {
this.context = context;
}
@Override
public Database getDatabase() {
return context.getMetadata().getDatabase();
}
@Override
public ServiceRegistry getServiceRegistry() {
return context.getBootstrapContext().getServiceRegistry();
}
@Override
public String getDefaultCatalog() {
return context.getSessionFactoryOptions().getDefaultCatalog();
}
@Override
public String getDefaultSchema() {
return context.getSessionFactoryOptions().getDefaultSchema();
}
@Override
public PersistentClass getPersistentClass() {
return persistentClass;
}
@Override
public RootClass getRootClass() {
return persistentClass.getRootClass();
}
@Override
public Property getProperty() {
return Property.this;
}
@Override
public Value getValue() {
return value;
}
@Override
public SqlStringGenerationContext getSqlStringGenerationContext() {
return context.getSqlStringGenerationContext();
}
}
}
| PropertyGeneratorCreationContext |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/io/support/SpringFactoriesLoader.java | {
"start": 13910,
"end": 16333
} | class ____.
* @param resourceLocation the resource location to look for factories
* @param classLoader the ClassLoader to use for loading resources;
* can be {@code null} to use the default
* @return a {@link SpringFactoriesLoader} instance
* @since 6.0
* @see #forResourceLocation(String)
*/
public static SpringFactoriesLoader forResourceLocation(String resourceLocation, @Nullable ClassLoader classLoader) {
Assert.hasText(resourceLocation, "'resourceLocation' must not be empty");
ClassLoader resourceClassLoader = (classLoader != null ? classLoader :
SpringFactoriesLoader.class.getClassLoader());
Map<String, Factories> factoriesCache = cache.computeIfAbsent(
resourceClassLoader, key -> new ConcurrentReferenceHashMap<>());
Factories factories = factoriesCache.computeIfAbsent(resourceLocation, key ->
new Factories(loadFactoriesResource(resourceClassLoader, resourceLocation)));
return new SpringFactoriesLoader(classLoader, factories.byType());
}
protected static Map<String, List<String>> loadFactoriesResource(ClassLoader classLoader, String resourceLocation) {
Map<String, List<String>> result = new LinkedHashMap<>();
try {
Enumeration<URL> urls = classLoader.getResources(resourceLocation);
while (urls.hasMoreElements()) {
UrlResource resource = new UrlResource(urls.nextElement());
Properties properties = PropertiesLoaderUtils.loadProperties(resource);
properties.forEach((name, value) -> {
String[] factoryImplementationNames = StringUtils.commaDelimitedListToStringArray((String) value);
List<String> implementations = result.computeIfAbsent(((String) name).trim(),
key -> new ArrayList<>(factoryImplementationNames.length));
Arrays.stream(factoryImplementationNames).map(String::trim).forEach(implementations::add);
});
}
result.replaceAll(SpringFactoriesLoader::toDistinctUnmodifiableList);
}
catch (IOException ex) {
throw new IllegalArgumentException("Unable to load factories from location [" + resourceLocation + "]", ex);
}
return Collections.unmodifiableMap(result);
}
private static List<String> toDistinctUnmodifiableList(String factoryType, List<String> implementations) {
return implementations.stream().distinct().toList();
}
/**
* Internal instantiator used to create the factory instance.
* @since 6.0
* @param <T> the instance implementation type
*/
static final | loader |
java | apache__camel | test-infra/camel-test-infra-cassandra/src/test/java/org/apache/camel/test/infra/cassandra/services/CassandraLocalContainerService.java | {
"start": 867,
"end": 988
} | class ____ extends CassandraLocalContainerInfraService
implements CassandraService {
}
| CassandraLocalContainerService |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/time/FastDateParser.java | {
"start": 8759,
"end": 11154
} | class ____ extends Strategy {
private final int field;
/**
* Constructs a Strategy that parses a Number field
*
* @param field The Calendar field
*/
NumberStrategy(final int field) {
this.field = field;
}
/**
* {@inheritDoc}
*/
@Override
boolean isNumber() {
return true;
}
/**
* Make any modifications to parsed integer
*
* @param parser The parser
* @param iValue The parsed integer
* @return The modified value
*/
int modify(final FastDateParser parser, final int iValue) {
return iValue;
}
@Override
boolean parse(final FastDateParser parser, final Calendar calendar, final String source, final ParsePosition pos, final int maxWidth) {
int idx = pos.getIndex();
int last = source.length();
if (maxWidth == 0) {
// if no maxWidth, strip leading white space
for (; idx < last; ++idx) {
final char c = source.charAt(idx);
if (!Character.isWhitespace(c)) {
break;
}
}
pos.setIndex(idx);
} else {
final int end = idx + maxWidth;
if (last > end) {
last = end;
}
}
for (; idx < last; ++idx) {
final char c = source.charAt(idx);
if (!Character.isDigit(c)) {
break;
}
}
if (pos.getIndex() == idx) {
pos.setErrorIndex(idx);
return false;
}
final int value = Integer.parseInt(source.substring(pos.getIndex(), idx));
pos.setIndex(idx);
calendar.set(field, modify(parser, value));
return true;
}
/**
* Converts this instance to a handy debug string.
*
* @since 3.12.0
*/
@Override
public String toString() {
return "NumberStrategy [field=" + field + "]";
}
}
/**
* A strategy to parse a single field from the parsing pattern
*/
private abstract static | NumberStrategy |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/spi/CommonTableDetails.java | {
"start": 250,
"end": 473
} | interface ____ extends DatabaseObjectDetails, UniqueConstraintCollector, IndexCollector {
/**
* The table name
*/
String name();
/**
* Setter for {@linkplain #name()}
*/
void name(String name);
}
| CommonTableDetails |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationAttemptReportRequestPBImpl.java | {
"start": 1591,
"end": 4440
} | class ____ extends
GetApplicationAttemptReportRequest {
GetApplicationAttemptReportRequestProto proto =
GetApplicationAttemptReportRequestProto.getDefaultInstance();
GetApplicationAttemptReportRequestProto.Builder builder = null;
boolean viaProto = false;
private ApplicationAttemptId applicationAttemptId = null;
public GetApplicationAttemptReportRequestPBImpl() {
builder = GetApplicationAttemptReportRequestProto.newBuilder();
}
public GetApplicationAttemptReportRequestPBImpl(
GetApplicationAttemptReportRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetApplicationAttemptReportRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (applicationAttemptId != null) {
builder
.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetApplicationAttemptReportRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
if (this.applicationAttemptId != null) {
return this.applicationAttemptId;
}
GetApplicationAttemptReportRequestProtoOrBuilder p =
viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.applicationAttemptId =
convertFromProtoFormat(p.getApplicationAttemptId());
return this.applicationAttemptId;
}
@Override
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
maybeInitBuilder();
if (applicationAttemptId == null) {
builder.clearApplicationAttemptId();
}
this.applicationAttemptId = applicationAttemptId;
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto p) {
return new ApplicationAttemptIdPBImpl(p);
}
private ApplicationAttemptIdProto
convertToProtoFormat(ApplicationAttemptId t) {
return ((ApplicationAttemptIdPBImpl) t).getProto();
}
}
| GetApplicationAttemptReportRequestPBImpl |
java | grpc__grpc-java | api/src/main/java/io/grpc/InternalChannelz.java | {
"start": 23407,
"end": 25662
} | class ____ {
public final int state;
public final int caState;
public final int retransmits;
public final int probes;
public final int backoff;
public final int options;
public final int sndWscale;
public final int rcvWscale;
public final int rto;
public final int ato;
public final int sndMss;
public final int rcvMss;
public final int unacked;
public final int sacked;
public final int lost;
public final int retrans;
public final int fackets;
public final int lastDataSent;
public final int lastAckSent;
public final int lastDataRecv;
public final int lastAckRecv;
public final int pmtu;
public final int rcvSsthresh;
public final int rtt;
public final int rttvar;
public final int sndSsthresh;
public final int sndCwnd;
public final int advmss;
public final int reordering;
TcpInfo(int state, int caState, int retransmits, int probes, int backoff, int options,
int sndWscale, int rcvWscale, int rto, int ato, int sndMss, int rcvMss, int unacked,
int sacked, int lost, int retrans, int fackets, int lastDataSent, int lastAckSent,
int lastDataRecv, int lastAckRecv, int pmtu, int rcvSsthresh, int rtt, int rttvar,
int sndSsthresh, int sndCwnd, int advmss, int reordering) {
this.state = state;
this.caState = caState;
this.retransmits = retransmits;
this.probes = probes;
this.backoff = backoff;
this.options = options;
this.sndWscale = sndWscale;
this.rcvWscale = rcvWscale;
this.rto = rto;
this.ato = ato;
this.sndMss = sndMss;
this.rcvMss = rcvMss;
this.unacked = unacked;
this.sacked = sacked;
this.lost = lost;
this.retrans = retrans;
this.fackets = fackets;
this.lastDataSent = lastDataSent;
this.lastAckSent = lastAckSent;
this.lastDataRecv = lastDataRecv;
this.lastAckRecv = lastAckRecv;
this.pmtu = pmtu;
this.rcvSsthresh = rcvSsthresh;
this.rtt = rtt;
this.rttvar = rttvar;
this.sndSsthresh = sndSsthresh;
this.sndCwnd = sndCwnd;
this.advmss = advmss;
this.reordering = reordering;
}
public static final | TcpInfo |
java | google__guava | guava/src/com/google/common/collect/Maps.java | {
"start": 7344,
"end": 26848
} | enum ____ order, not encounter order.
*
* <p>If the mapped keys contain duplicates, the values are merged using the specified merging
* function.
*
* @since 21.0
*/
public static <T extends @Nullable Object, K extends Enum<K>, V>
Collector<T, ?, ImmutableMap<K, V>> toImmutableEnumMap(
java.util.function.Function<? super T, ? extends K> keyFunction,
java.util.function.Function<? super T, ? extends V> valueFunction,
BinaryOperator<V> mergeFunction) {
return CollectCollectors.toImmutableEnumMap(keyFunction, valueFunction, mergeFunction);
}
/**
* Creates a <i>mutable</i>, empty {@code HashMap} instance.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableMap#of()} instead.
*
* <p><b>Note:</b> if {@code K} is an {@code enum} type, use {@link #newEnumMap} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code HashMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @return a new, empty {@code HashMap}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
HashMap<K, V> newHashMap() {
return new HashMap<>();
}
/**
* Creates a <i>mutable</i> {@code HashMap} instance with the same mappings as the specified map.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableMap#copyOf(Map)} instead.
*
* <p><b>Note:</b> if {@code K} is an {@link Enum} type, use {@link #newEnumMap} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code HashMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @param map the mappings to be placed in the new map
* @return a new {@code HashMap} initialized with the mappings from {@code map}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object> HashMap<K, V> newHashMap(
Map<? extends K, ? extends V> map) {
return new HashMap<>(map);
}
/**
* Creates a {@code HashMap} instance, with a high enough "initial capacity" that it <i>should</i>
* hold {@code expectedSize} elements without growth. This behavior cannot be broadly guaranteed,
* but it is observed to be true for OpenJDK 1.7. It also can't be guaranteed that the method
* isn't inadvertently <i>oversizing</i> the returned map.
*
* @param expectedSize the number of entries you expect to add to the returned map
* @return a new, empty {@code HashMap} with enough capacity to hold {@code expectedSize} entries
* without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
HashMap<K, V> newHashMapWithExpectedSize(int expectedSize) {
return new HashMap<>(capacity(expectedSize));
}
/**
* Returns a capacity that is sufficient to keep the map from being resized as long as it grows no
* larger than expectedSize and the load factor is ≥ its default (0.75).
*/
static int capacity(int expectedSize) {
if (expectedSize < 3) {
checkNonnegative(expectedSize, "expectedSize");
return expectedSize + 1;
}
if (expectedSize < Ints.MAX_POWER_OF_TWO) {
// This seems to be consistent across JDKs. The capacity argument to HashMap and LinkedHashMap
// ends up being used to compute a "threshold" size, beyond which the internal table
// will be resized. That threshold is ceilingPowerOfTwo(capacity*loadFactor), where
// loadFactor is 0.75 by default. So with the calculation here we ensure that the
// threshold is equal to ceilingPowerOfTwo(expectedSize). There is a separate code
// path when the first operation on the new map is putAll(otherMap). There, prior to
// https://github.com/openjdk/jdk/commit/3e393047e12147a81e2899784b943923fc34da8e, a bug
// meant that sometimes a too-large threshold is calculated. However, this new threshold is
// independent of the initial capacity, except that it won't be lower than the threshold
// computed from that capacity. Because the internal table is only allocated on the first
// write, we won't see copying because of the new threshold. So it is always OK to use the
// calculation here.
return (int) ceil(expectedSize / 0.75);
}
return Integer.MAX_VALUE; // any large value
}
/**
* Creates a <i>mutable</i>, empty, insertion-ordered {@code LinkedHashMap} instance.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableMap#of()} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code LinkedHashMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @return a new, empty {@code LinkedHashMap}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
LinkedHashMap<K, V> newLinkedHashMap() {
return new LinkedHashMap<>();
}
/**
* Creates a <i>mutable</i>, insertion-ordered {@code LinkedHashMap} instance with the same
* mappings as the specified map.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableMap#copyOf(Map)} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code LinkedHashMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @param map the mappings to be placed in the new map
* @return a new, {@code LinkedHashMap} initialized with the mappings from {@code map}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
LinkedHashMap<K, V> newLinkedHashMap(Map<? extends K, ? extends V> map) {
return new LinkedHashMap<>(map);
}
/**
* Creates a {@code LinkedHashMap} instance, with a high enough "initial capacity" that it
* <i>should</i> hold {@code expectedSize} elements without growth. This behavior cannot be
* broadly guaranteed, but it is observed to be true for OpenJDK 1.7. It also can't be guaranteed
* that the method isn't inadvertently <i>oversizing</i> the returned map.
*
* @param expectedSize the number of entries you expect to add to the returned map
* @return a new, empty {@code LinkedHashMap} with enough capacity to hold {@code expectedSize}
* entries without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
* @since 19.0
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
LinkedHashMap<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) {
return new LinkedHashMap<>(capacity(expectedSize));
}
/**
* Creates a new empty {@link ConcurrentHashMap} instance.
*
* @since 3.0
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentMap() {
return new ConcurrentHashMap<>();
}
/**
* Creates a <i>mutable</i>, empty {@code TreeMap} instance using the natural ordering of its
* elements.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableSortedMap#of()} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code TreeMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @return a new, empty {@code TreeMap}
*/
@SuppressWarnings({
"rawtypes", // https://github.com/google/guava/issues/989
"NonApiType", // acts as a direct substitute for a constructor call
})
public static <K extends Comparable, V extends @Nullable Object> TreeMap<K, V> newTreeMap() {
return new TreeMap<>();
}
/**
* Creates a <i>mutable</i> {@code TreeMap} instance with the same mappings as the specified map
* and using the same ordering as the specified map.
*
* <p><b>Note:</b> if mutability is not required, use {@link
* ImmutableSortedMap#copyOfSorted(SortedMap)} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code TreeMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @param map the sorted map whose mappings are to be placed in the new map and whose comparator
* is to be used to sort the new map
* @return a new {@code TreeMap} initialized with the mappings from {@code map} and using the
* comparator of {@code map}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object> TreeMap<K, V> newTreeMap(
SortedMap<K, ? extends V> map) {
return new TreeMap<>(map);
}
/**
* Creates a <i>mutable</i>, empty {@code TreeMap} instance using the given comparator.
*
* <p><b>Note:</b> if mutability is not required, use {@code
* ImmutableSortedMap.orderedBy(comparator).build()} instead.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code TreeMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @param comparator the comparator to sort the keys with
* @return a new, empty {@code TreeMap}
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <C extends @Nullable Object, K extends C, V extends @Nullable Object>
TreeMap<K, V> newTreeMap(@Nullable Comparator<C> comparator) {
// Ideally, the extra type parameter "C" shouldn't be necessary. It is a
// work-around of a compiler type inference quirk that prevents the
// following code from being compiled:
// Comparator<Class<?>> comparator = null;
// Map<Class<? extends Throwable>, String> map = newTreeMap(comparator);
return new TreeMap<>(comparator);
}
/**
* Creates an {@code EnumMap} instance.
*
* @param type the key type for this map
* @return a new, empty {@code EnumMap}
*/
public static <K extends Enum<K>, V extends @Nullable Object> EnumMap<K, V> newEnumMap(
Class<K> type) {
return new EnumMap<>(checkNotNull(type));
}
/**
* Creates an {@code EnumMap} with the same mappings as the specified map.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code EnumMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @param map the map from which to initialize this {@code EnumMap}
* @return a new {@code EnumMap} initialized with the mappings from {@code map}
* @throws IllegalArgumentException if {@code m} is not an {@code EnumMap} instance and contains
* no mappings
*/
public static <K extends Enum<K>, V extends @Nullable Object> EnumMap<K, V> newEnumMap(
Map<K, ? extends V> map) {
return new EnumMap<>(map);
}
/**
* Creates an {@code IdentityHashMap} instance.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code IdentityHashMap} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* @return a new, empty {@code IdentityHashMap}
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
IdentityHashMap<K, V> newIdentityHashMap() {
return new IdentityHashMap<>();
}
/**
* Computes the difference between two maps. This difference is an immutable snapshot of the state
* of the maps at the time this method is called. It will never change, even if the maps change at
* a later time.
*
* <p>Since this method uses {@code HashMap} instances internally, the keys of the supplied maps
* must be well-behaved with respect to {@link Object#equals} and {@link Object#hashCode}.
*
* <p><b>Note:</b>If you only need to know whether two maps have the same mappings, call {@code
* left.equals(right)} instead of this method.
*
* @param left the map to treat as the "left" map for purposes of comparison
* @param right the map to treat as the "right" map for purposes of comparison
* @return the difference between the two maps
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
MapDifference<K, V> difference(
Map<? extends K, ? extends V> left, Map<? extends K, ? extends V> right) {
if (left instanceof SortedMap) {
@SuppressWarnings("unchecked")
SortedMap<K, ? extends V> sortedLeft = (SortedMap<K, ? extends V>) left;
return difference(sortedLeft, right);
}
return difference(left, right, Equivalence.equals());
}
/**
* Computes the difference between two maps. This difference is an immutable snapshot of the state
* of the maps at the time this method is called. It will never change, even if the maps change at
* a later time.
*
* <p>Since this method uses {@code HashMap} instances internally, the keys of the supplied maps
* must be well-behaved with respect to {@link Object#equals} and {@link Object#hashCode}.
*
* @param left the map to treat as the "left" map for purposes of comparison
* @param right the map to treat as the "right" map for purposes of comparison
* @param valueEquivalence the equivalence relationship to use to compare values
* @return the difference between the two maps
* @since 10.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
MapDifference<K, V> difference(
Map<? extends K, ? extends V> left,
Map<? extends K, ? extends V> right,
Equivalence<? super @NonNull V> valueEquivalence) {
Preconditions.checkNotNull(valueEquivalence);
Map<K, V> onlyOnLeft = new LinkedHashMap<>();
Map<K, V> onlyOnRight = new LinkedHashMap<>(right); // will whittle it down
Map<K, V> onBoth = new LinkedHashMap<>();
Map<K, ValueDifference<V>> differences = new LinkedHashMap<>();
doDifference(left, right, valueEquivalence, onlyOnLeft, onlyOnRight, onBoth, differences);
return new MapDifferenceImpl<>(onlyOnLeft, onlyOnRight, onBoth, differences);
}
/**
* Computes the difference between two sorted maps, using the comparator of the left map, or
* {@code Ordering.natural()} if the left map uses the natural ordering of its elements. This
* difference is an immutable snapshot of the state of the maps at the time this method is called.
* It will never change, even if the maps change at a later time.
*
* <p>Since this method uses {@code TreeMap} instances internally, the keys of the right map must
* all compare as distinct according to the comparator of the left map.
*
* <p><b>Note:</b>If you only need to know whether two sorted maps have the same mappings, call
* {@code left.equals(right)} instead of this method.
*
* @param left the map to treat as the "left" map for purposes of comparison
* @param right the map to treat as the "right" map for purposes of comparison
* @return the difference between the two maps
* @since 11.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedMapDifference<K, V> difference(
SortedMap<K, ? extends V> left, Map<? extends K, ? extends V> right) {
checkNotNull(left);
checkNotNull(right);
Comparator<? super K> comparator = orNaturalOrder(left.comparator());
SortedMap<K, V> onlyOnLeft = newTreeMap(comparator);
SortedMap<K, V> onlyOnRight = newTreeMap(comparator);
onlyOnRight.putAll(right); // will whittle it down
SortedMap<K, V> onBoth = newTreeMap(comparator);
SortedMap<K, ValueDifference<V>> differences = newTreeMap(comparator);
doDifference(left, right, Equivalence.equals(), onlyOnLeft, onlyOnRight, onBoth, differences);
return new SortedMapDifferenceImpl<>(onlyOnLeft, onlyOnRight, onBoth, differences);
}
private static <K extends @Nullable Object, V extends @Nullable Object> void doDifference(
Map<? extends K, ? extends V> left,
Map<? extends K, ? extends V> right,
Equivalence<? super @NonNull V> valueEquivalence,
Map<K, V> onlyOnLeft,
Map<K, V> onlyOnRight,
Map<K, V> onBoth,
Map<K, ValueDifference<V>> differences) {
for (Entry<? extends K, ? extends V> entry : left.entrySet()) {
K leftKey = entry.getKey();
V leftValue = entry.getValue();
if (right.containsKey(leftKey)) {
/*
* The cast is safe because onlyOnRight contains all the keys of right.
*
* TODO(cpovirk): Consider checking onlyOnRight.containsKey instead of right.containsKey.
* That could change behavior if the input maps use different equivalence relations (and so
* a key that appears once in `right` might appear multiple times in `left`). We don't
* guarantee behavior in that case, anyway, and the current behavior is likely undesirable.
* So that's either a reason to feel free to change it or a reason to not bother thinking
* further about this.
*/
V rightValue = uncheckedCastNullableTToT(onlyOnRight.remove(leftKey));
if (valueEquivalence.equivalent(leftValue, rightValue)) {
onBoth.put(leftKey, leftValue);
} else {
differences.put(leftKey, ValueDifferenceImpl.create(leftValue, rightValue));
}
} else {
onlyOnLeft.put(leftKey, leftValue);
}
}
}
private static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> unmodifiableMap(
Map<K, ? extends V> map) {
if (map instanceof SortedMap) {
return Collections.unmodifiableSortedMap((SortedMap<K, ? extends V>) map);
} else {
return Collections.unmodifiableMap(map);
}
}
private static | definition |
java | grpc__grpc-java | services/src/main/java/io/grpc/protobuf/services/BinaryLogProvider.java | {
"start": 1511,
"end": 4360
} | class ____ extends BinaryLog {
@VisibleForTesting
public static final Marshaller<byte[]> BYTEARRAY_MARSHALLER = new ByteArrayMarshaller();
private final ClientInterceptor binaryLogShim = new BinaryLogShim();
/**
* Wraps a channel to provide binary logging on {@link ClientCall}s as needed.
*/
@Override
public final Channel wrapChannel(Channel channel) {
return ClientInterceptors.intercept(channel, binaryLogShim);
}
private static MethodDescriptor<byte[], byte[]> toByteBufferMethod(
MethodDescriptor<?, ?> method) {
return method.toBuilder(BYTEARRAY_MARSHALLER, BYTEARRAY_MARSHALLER).build();
}
/**
* Wraps a {@link ServerMethodDefinition} such that it performs binary logging if needed.
*/
@Override
public final <ReqT, RespT> ServerMethodDefinition<?, ?> wrapMethodDefinition(
ServerMethodDefinition<ReqT, RespT> oMethodDef) {
ServerInterceptor binlogInterceptor =
getServerInterceptor(oMethodDef.getMethodDescriptor().getFullMethodName());
if (binlogInterceptor == null) {
return oMethodDef;
}
MethodDescriptor<byte[], byte[]> binMethod =
BinaryLogProvider.toByteBufferMethod(oMethodDef.getMethodDescriptor());
ServerMethodDefinition<byte[], byte[]> binDef =
InternalServerInterceptors.wrapMethod(oMethodDef, binMethod);
ServerCallHandler<byte[], byte[]> binlogHandler =
InternalServerInterceptors.interceptCallHandlerCreate(
binlogInterceptor, binDef.getServerCallHandler());
return ServerMethodDefinition.create(binMethod, binlogHandler);
}
/**
* Returns a {@link ServerInterceptor} for binary logging. gRPC is free to cache the interceptor,
* so the interceptor must be reusable across calls. At runtime, the request and response
* marshallers are always {@code Marshaller<InputStream>}.
* Returns {@code null} if this method is not binary logged.
*/
// TODO(zpencer): ensure the interceptor properly handles retries and hedging
@Nullable
protected abstract ServerInterceptor getServerInterceptor(String fullMethodName);
/**
* Returns a {@link ClientInterceptor} for binary logging. gRPC is free to cache the interceptor,
* so the interceptor must be reusable across calls. At runtime, the request and response
* marshallers are always {@code Marshaller<InputStream>}.
* Returns {@code null} if this method is not binary logged.
*/
// TODO(zpencer): ensure the interceptor properly handles retries and hedging
@Nullable
protected abstract ClientInterceptor getClientInterceptor(
String fullMethodName, CallOptions callOptions);
@Override
public void close() throws IOException {
// default impl: noop
// TODO(zpencer): make BinaryLogProvider provide a BinaryLog, and this method belongs there
}
// Creating a named | BinaryLogProvider |
java | google__guice | core/test/com/google/inject/TypeConversionTest.java | {
"start": 10911,
"end": 11280
} | class ____ extends AbstractModule {
private final Module converterModule;
InnerModule(Module converterModule) {
this.converterModule = converterModule;
}
@Override
protected void configure() {
install(converterModule);
bindConstant().annotatedWith(NumericValue.class).to("foo");
bind(DateHolder.class);
}
}
| InnerModule |
java | apache__camel | components/camel-test/camel-test-spring-junit5/src/main/java/org/apache/camel/test/spring/junit5/CamelSpringTestContextLoader.java | {
"start": 2107,
"end": 2961
} | class ____ extends AbstractContextLoader {
private static final Logger LOG = LoggerFactory.getLogger(CamelSpringTestContextLoader.class);
/**
* Modeled after the Spring implementation in {@link AbstractGenericContextLoader}, this method creates and
* refreshes the application context while providing for processing of additional Camel specific post-refresh
* actions. We do not provide the pre-post hooks for customization seen in {@link AbstractGenericContextLoader}
* because they probably are unnecessary for 90+% of users.
* <p/>
* For some functionality, we cannot use {@link org.springframework.test.context.TestExecutionListener} because we
* need to both produce the desired outcome during application context loading, and also cleanup after ourselves
* even if the test | CamelSpringTestContextLoader |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/support/SystemEnvironmentPropertySourceEnvironmentPostProcessor.java | {
"start": 1746,
"end": 4281
} | class ____ implements EnvironmentPostProcessor, Ordered {
/**
* The default order for the processor.
*/
public static final int DEFAULT_ORDER = SpringApplicationJsonEnvironmentPostProcessor.DEFAULT_ORDER - 1;
private int order = DEFAULT_ORDER;
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
postProcessEnvironment(environment, application.getEnvironmentPrefix());
}
private void postProcessEnvironment(ConfigurableEnvironment environment, @Nullable String environmentPrefix) {
String sourceName = StandardEnvironment.SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME;
PropertySource<?> propertySource = environment.getPropertySources().get(sourceName);
if (propertySource != null) {
replacePropertySource(environment, sourceName, propertySource, environmentPrefix);
}
}
@SuppressWarnings("unchecked")
private void replacePropertySource(ConfigurableEnvironment environment, String sourceName,
PropertySource<?> propertySource, @Nullable String environmentPrefix) {
Map<String, Object> originalSource = (Map<String, Object>) propertySource.getSource();
SystemEnvironmentPropertySource source = new OriginAwareSystemEnvironmentPropertySource(sourceName,
originalSource, environmentPrefix);
environment.getPropertySources().replace(sourceName, source);
}
@Override
public int getOrder() {
return this.order;
}
public void setOrder(int order) {
this.order = order;
}
/**
* Post-process the given {@link ConfigurableEnvironment} by copying appropriate
* settings from a parent {@link ConfigurableEnvironment}.
* @param environment the environment to post-process
* @param parentEnvironment the parent environment
* @since 3.4.12
*/
public static void postProcessEnvironment(ConfigurableEnvironment environment,
ConfigurableEnvironment parentEnvironment) {
PropertySource<?> parentSystemEnvironmentPropertySource = parentEnvironment.getPropertySources()
.get(StandardEnvironment.SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME);
if (parentSystemEnvironmentPropertySource instanceof OriginAwareSystemEnvironmentPropertySource parentOriginAwareSystemEnvironmentPropertySource) {
new SystemEnvironmentPropertySourceEnvironmentPostProcessor().postProcessEnvironment(environment,
parentOriginAwareSystemEnvironmentPropertySource.getPrefix());
}
}
/**
* {@link SystemEnvironmentPropertySource} that also tracks {@link Origin}.
*/
protected static | SystemEnvironmentPropertySourceEnvironmentPostProcessor |
java | quarkusio__quarkus | extensions/cache/runtime/src/main/java/io/quarkus/cache/runtime/CacheInterceptionContext.java | {
"start": 196,
"end": 949
} | class ____<T> {
private final List<T> interceptorBindings;
private final List<Short> cacheKeyParameterPositions;
public CacheInterceptionContext(List<T> interceptorBindings, List<Short> cacheKeyParameterPositions) {
Objects.requireNonNull(interceptorBindings);
Objects.requireNonNull(cacheKeyParameterPositions);
this.interceptorBindings = Collections.unmodifiableList(interceptorBindings);
this.cacheKeyParameterPositions = Collections.unmodifiableList(cacheKeyParameterPositions);
}
public List<T> getInterceptorBindings() {
return interceptorBindings;
}
public List<Short> getCacheKeyParameterPositions() {
return cacheKeyParameterPositions;
}
}
| CacheInterceptionContext |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/MapKeyCompositeTypeAnnotation.java | {
"start": 477,
"end": 1635
} | class ____ implements MapKeyCompositeType {
private java.lang.Class<? extends org.hibernate.usertype.CompositeUserType<?>> value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public MapKeyCompositeTypeAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public MapKeyCompositeTypeAnnotation(MapKeyCompositeType annotation, ModelsContext modelContext) {
this.value = annotation.value();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public MapKeyCompositeTypeAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (Class<? extends org.hibernate.usertype.CompositeUserType<?>>) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return MapKeyCompositeType.class;
}
@Override
public java.lang.Class<? extends org.hibernate.usertype.CompositeUserType<?>> value() {
return value;
}
public void value(java.lang.Class<? extends org.hibernate.usertype.CompositeUserType<?>> value) {
this.value = value;
}
}
| MapKeyCompositeTypeAnnotation |
java | spring-projects__spring-framework | spring-r2dbc/src/test/java/org/springframework/r2dbc/core/AbstractDatabaseClientIntegrationTests.java | {
"start": 6708,
"end": 10115
} | class ____ {
@Test // gh-34768
void executeInsertWithReusedNamedParameter() {
DatabaseClient databaseClient = DatabaseClient.create(connectionFactory);
Lego lego = new Lego(1, 42, "Star Wars", 42);
// ":number" is reused.
databaseClient.sql("INSERT INTO legoset (id, version, name, manual) VALUES(:id, :number, :name, :number)")
.bind("id", lego.id)
.bind("name", lego.name)
.bind("number", lego.version)
.fetch().rowsUpdated()
.as(StepVerifier::create)
.expectNext(1L)
.verifyComplete();
databaseClient.sql("SELECT * FROM legoset")
.mapProperties(Lego.class)
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).isEqualTo(lego))
.verifyComplete();
}
@Test // gh-34768
void executeSelectWithReusedNamedParameterList() {
DatabaseClient databaseClient = DatabaseClient.create(connectionFactory);
String insertSql = "INSERT INTO legoset (id, version, name, manual) VALUES(:id, :version, :name, :manual)";
// ":numbers" is reused.
String selectSql = "SELECT * FROM legoset WHERE version IN (:numbers) OR manual IN (:numbers)";
Lego lego = new Lego(1, 42, "Star Wars", 99);
databaseClient.sql(insertSql)
.bind("id", lego.id)
.bind("version", lego.version)
.bind("name", lego.name)
.bind("manual", lego.manual)
.fetch().rowsUpdated()
.as(StepVerifier::create)
.expectNext(1L)
.verifyComplete();
databaseClient.sql(selectSql)
// match version
.bind("numbers", List.of(2, 3, lego.version, 4))
.mapProperties(Lego.class)
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).isEqualTo(lego))
.verifyComplete();
databaseClient.sql(selectSql)
// match manual
.bind("numbers", List.of(2, 3, lego.manual, 4))
.mapProperties(Lego.class)
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).isEqualTo(lego))
.verifyComplete();
}
@Test // gh-34768
void executeSelectWithReusedNamedParameterListFromBeanProperties() {
DatabaseClient databaseClient = DatabaseClient.create(connectionFactory);
String insertSql = "INSERT INTO legoset (id, version, name, manual) VALUES(:id, :version, :name, :manual)";
// ":numbers" is reused.
String selectSql = "SELECT * FROM legoset WHERE version IN (:numbers) OR manual IN (:numbers)";
Lego lego = new Lego(1, 42, "Star Wars", 99);
databaseClient.sql(insertSql)
.bind("id", lego.id)
.bind("version", lego.version)
.bind("name", lego.name)
.bind("manual", lego.manual)
.fetch().rowsUpdated()
.as(StepVerifier::create)
.expectNext(1L)
.verifyComplete();
databaseClient.sql(selectSql)
// match version
.bindProperties(new LegoRequest(List.of(lego.version)))
.mapProperties(Lego.class)
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).isEqualTo(lego))
.verifyComplete();
databaseClient.sql(selectSql)
// match manual
.bindProperties(new LegoRequest(List.of(lego.manual)))
.mapProperties(Lego.class)
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).isEqualTo(lego))
.verifyComplete();
}
record Lego(int id, Integer version, String name, Integer manual) {
}
static | ReusedNamedParameterTests |
java | square__moshi | examples/src/main/java/com/squareup/moshi/recipes/CustomAdapterFactory.java | {
"start": 1085,
"end": 1569
} | class ____ {
public void run() throws Exception {
Moshi moshi = new Moshi.Builder().add(new SortedSetAdapterFactory()).build();
JsonAdapter<SortedSet<String>> jsonAdapter =
moshi.adapter(Types.newParameterizedType(SortedSet.class, String.class));
TreeSet<String> model = new TreeSet<>();
model.add("a");
model.add("b");
model.add("c");
String json = jsonAdapter.toJson(model);
System.out.println(json);
}
/**
* This | CustomAdapterFactory |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/sql_load_script/ImportSqlLoadScriptAsZipFileTestCase.java | {
"start": 300,
"end": 951
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyEntity.class, SqlLoadScriptTestResource.class)
.addAsResource("application-load-script-as-zip-file-test.properties", "application.properties")
.addAsResource("load-script-test.zip"));
@Test
public void testSqlLoadScriptAsZipFile() {
String name = "other-load-script sql load script entity";
RestAssured.when().get("/orm-sql-load-script/3").then().body(Matchers.is(name));
}
}
| ImportSqlLoadScriptAsZipFileTestCase |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/repeatable/Mapper.java | {
"start": 6108,
"end": 6708
} | class ____ {
public static String getUserUsingProvider() {
return "SELECT id, name, 'DERBY' as databaseName FROM users WHERE id = #{id}";
}
public static String insertUserUsingProvider() {
return "INSERT INTO users (id, name) VALUES(#{id}, #{name} || ' DERBY')";
}
public static String updateUserNameUsingProvider() {
return "UPDATE users SET name = name || ' DERBY' WHERE id = #{id}";
}
public static String delete() {
return "DELETE FROM users WHERE name LIKE '%DERBY%'";
}
private DerbySqlProvider() {
}
}
final | DerbySqlProvider |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java | {
"start": 1473,
"end": 2627
} | class ____ extends AbstractResponse {
private final InitProducerIdResponseData data;
public InitProducerIdResponse(InitProducerIdResponseData data) {
super(ApiKeys.INIT_PRODUCER_ID);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public InitProducerIdResponseData data() {
return data;
}
public static InitProducerIdResponse parse(Readable readable, short version) {
return new InitProducerIdResponse(new InitProducerIdResponseData(readable, version));
}
@Override
public String toString() {
return data.toString();
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
| InitProducerIdResponse |
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/grpc/v2/channel/GrpcClientChannel.java | {
"start": 3190,
"end": 5886
} | class ____ extends ProxyChannel implements ChannelExtendAttributeGetter, RemoteChannelConverter {
private static final Logger log = LoggerFactory.getLogger(LoggerName.PROXY_LOGGER_NAME);
private final GrpcChannelManager grpcChannelManager;
private final GrpcClientSettingsManager grpcClientSettingsManager;
private final AtomicReference<StreamObserver<TelemetryCommand>> telemetryCommandRef = new AtomicReference<>();
private final Object telemetryWriteLock = new Object();
private final String clientId;
public GrpcClientChannel(ProxyRelayService proxyRelayService, GrpcClientSettingsManager grpcClientSettingsManager,
GrpcChannelManager grpcChannelManager, ProxyContext ctx, String clientId) {
super(proxyRelayService, null, new GrpcChannelId(clientId),
ctx.getRemoteAddress(),
ctx.getLocalAddress());
this.grpcChannelManager = grpcChannelManager;
this.grpcClientSettingsManager = grpcClientSettingsManager;
this.clientId = clientId;
}
@Override
public String getChannelExtendAttribute() {
Settings settings = this.grpcClientSettingsManager.getRawClientSettings(this.clientId);
if (settings == null) {
return null;
}
try {
return JsonFormat.printer().print(settings);
} catch (InvalidProtocolBufferException e) {
log.error("convert settings to json data failed. settings:{}", settings, e);
}
return null;
}
public static Settings parseChannelExtendAttribute(Channel channel) {
if (ChannelHelper.getChannelProtocolType(channel).equals(ChannelProtocolType.GRPC_V2) &&
channel instanceof ChannelExtendAttributeGetter) {
String attr = ((ChannelExtendAttributeGetter) channel).getChannelExtendAttribute();
if (attr == null) {
return null;
}
Settings.Builder builder = Settings.newBuilder();
try {
JsonFormat.parser().merge(attr, builder);
return builder.build();
} catch (InvalidProtocolBufferException e) {
log.error("convert settings json data to settings failed. data:{}", attr, e);
return null;
}
}
return null;
}
@Override
public RemoteChannel toRemoteChannel() {
return new RemoteChannel(
ConfigurationManager.getProxyConfig().getLocalServeAddr(),
this.getRemoteAddress(),
this.getLocalAddress(),
ChannelProtocolType.GRPC_V2,
this.getChannelExtendAttribute());
}
protected static | GrpcClientChannel |
java | apache__flink | flink-end-to-end-tests/flink-queryable-state-test/src/main/java/org/apache/flink/streaming/tests/queryablestate/LabelSurrogate.java | {
"start": 893,
"end": 1498
} | class ____ {
private Type type;
private String foo;
public LabelSurrogate(Type type, String foo) {
this.type = type;
this.foo = foo;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
@Override
public String toString() {
return "LabelSurrogate{" + "type=" + type + ", foo='" + foo + '\'' + '}';
}
/** An exemplary enum. */
public | LabelSurrogate |
java | elastic__elasticsearch | x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankMultiShardIT.java | {
"start": 1353,
"end": 70745
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(RRFRankPlugin.class);
}
@Override
protected int minimumNumberOfShards() {
return 2;
}
@Override
protected int maximumNumberOfShards() {
return 7;
}
@Override
protected int maximumNumberOfReplicas() {
return 0;
}
@Override
public void setupSuiteScopeCluster() throws Exception {
// Set up an index with a very small number of documents to
// test sizing limits and issues with empty data.
XContentBuilder builder = XContentFactory.jsonBuilder()
.startObject()
.startObject("properties")
.startObject("vector")
.field("type", "dense_vector")
.field("dims", 1)
.field("index", true)
.field("similarity", "l2_norm")
.endObject()
.startObject("text")
.field("type", "text")
.endObject()
.endObject()
.endObject();
assertAcked(prepareCreate("tiny_index").setMapping(builder));
ensureGreen("tiny_index");
prepareIndex("tiny_index").setSource("vector", new float[] { 0.0f }, "text", "term term").get();
prepareIndex("tiny_index").setSource("vector", new float[] { 1.0f }, "text", "other").get();
prepareIndex("tiny_index").setSource("vector", new float[] { 2.0f }, "text", "term").get();
indicesAdmin().prepareRefresh("tiny_index").get();
// Set up an index with non-random data, so we can
// do direct tests against expected results.
builder = XContentFactory.jsonBuilder()
.startObject()
.startObject("properties")
.startObject("vector_asc")
.field("type", "dense_vector")
.field("dims", 1)
.field("index", true)
.field("similarity", "l2_norm")
.endObject()
.startObject("vector_desc")
.field("type", "dense_vector")
.field("dims", 1)
.field("index", true)
.field("similarity", "l2_norm")
.endObject()
.startObject("int")
.field("type", "integer")
.endObject()
.startObject("text0")
.field("type", "text")
.endObject()
.startObject("text1")
.field("type", "text")
.endObject()
.endObject()
.endObject();
assertAcked(prepareCreate("nrd_index").setMapping(builder));
ensureGreen(TimeValue.timeValueSeconds(120), "nrd_index");
for (int doc = 0; doc < 1001; ++doc) {
prepareIndex("nrd_index").setSource(
"vector_asc",
new float[] { doc },
"vector_desc",
new float[] { 1000 - doc },
"int",
doc % 3,
"text0",
"term " + doc,
"text1",
"term " + (1000 - doc)
).get();
}
indicesAdmin().prepareRefresh("nrd_index").get();
}
public void testTotalDocsSmallerThanSize() {
float[] queryVector = { 0.0f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector", queryVector, 3, 3, 10f, null, null);
assertResponse(
prepareSearch("tiny_index").setRankBuilder(new RRFRankBuilder(100, 1))
.setKnnSearch(List.of(knnSearch))
.setQuery(QueryBuilders.termQuery("text", "term"))
.addFetchField("vector")
.addFetchField("text"),
response -> {
// we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double
assertEquals(3, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(0.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0);
assertEquals("term term", hit.field("text").getValue());
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertEquals(2.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0);
assertEquals("term", hit.field("text").getValue());
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertEquals(1.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0);
assertEquals("other", hit.field("text").getValue());
}
);
}
public void testBM25AndKnn() {
float[] queryVector = { 500.0f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearch))
.setQuery(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(11.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "493")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
)
.addFetchField("vector_asc")
.addFetchField("text0")
.setSize(11),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(11, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors);
}
);
}
public void testMultipleOnlyKnn() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1))
.setTrackTotalHits(true)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.addFetchField("vector_asc")
.addFetchField("text0")
.setSize(19),
response -> {
assertEquals(51, response.getHits().getTotalHits().value());
assertEquals(19, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(
Set.of(
491.0,
492.0,
493.0,
494.0,
495.0,
496.0,
497.0,
498.0,
499.0,
500.0,
501.0,
502.0,
503.0,
504.0,
505.0,
506.0,
507.0,
508.0,
509.0
),
vectors
);
}
);
}
public void testBM25AndMultipleKnn() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.setQuery(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(20.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "485")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "506")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "505")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "511")).boost(9.0f))
)
.addFetchField("vector_asc")
.addFetchField("vector_desc")
.addFetchField("text0")
.setSize(19),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(19, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
assertEquals("term 499", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(
Set.of(
485.0,
492.0,
493.0,
494.0,
495.0,
496.0,
497.0,
498.0,
499.0,
500.0,
501.0,
502.0,
503.0,
504.0,
505.0,
506.0,
507.0,
508.0,
511.0
),
vectors
);
}
);
}
public void testBM25AndKnnWithBucketAggregation() {
float[] queryVector = { 500.0f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(true)
.setKnnSearch(List.of(knnSearch))
.setQuery(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(11.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "493")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
)
.addFetchField("vector_asc")
.addFetchField("text0")
.setSize(11)
.addAggregation(AggregationBuilders.terms("sums").field("int")),
response -> {
assertEquals(101, response.getHits().getTotalHits().value());
assertEquals(11, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors);
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(34, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(34, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(33, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
public void testMultipleOnlyKnnWithAggregation() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.addFetchField("vector_asc")
.addFetchField("text0")
.setSize(19)
.addAggregation(AggregationBuilders.terms("sums").field("int")),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(19, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(
Set.of(
491.0,
492.0,
493.0,
494.0,
495.0,
496.0,
497.0,
498.0,
499.0,
500.0,
501.0,
502.0,
503.0,
504.0,
505.0,
506.0,
507.0,
508.0,
509.0
),
vectors
);
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
public void testBM25AndMultipleKnnWithAggregation() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1))
.setTrackTotalHits(true)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.setQuery(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(20.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "485")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "506")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "505")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "511")).boost(9.0f))
)
.addFetchField("vector_asc")
.addFetchField("vector_desc")
.addFetchField("text0")
.setSize(19)
.addAggregation(AggregationBuilders.terms("sums").field("int"))
.setStats("search"),
response -> {
assertEquals(51, response.getHits().getTotalHits().value());
assertEquals(19, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
assertEquals("term 500", hit.field("text0").getValue());
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
assertEquals("term 499", hit.field("text0").getValue());
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(
Set.of(
485.0,
492.0,
493.0,
494.0,
495.0,
496.0,
497.0,
498.0,
499.0,
500.0,
501.0,
502.0,
503.0,
504.0,
505.0,
506.0,
507.0,
508.0,
511.0
),
vectors
);
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(17, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
public void testMultiBM25() {
for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) {
assertResponse(
prepareSearch("nrd_index").setSearchType(searchType)
.setRankBuilder(new RRFRankBuilder(8, 1))
.setTrackTotalHits(false)
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "800")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.setSize(5),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 492", hit.field("text0").getValue());
assertEquals("term 508", hit.field("text1").getValue());
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertEquals("term 499", hit.field("text0").getValue());
assertEquals("term 501", hit.field("text1").getValue());
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
hit = response.getHits().getAt(3);
assertEquals(4, hit.getRank());
assertEquals("term 498", hit.field("text0").getValue());
assertEquals("term 502", hit.field("text1").getValue());
hit = response.getHits().getAt(4);
assertEquals(5, hit.getRank());
assertEquals("term 496", hit.field("text0").getValue());
assertEquals("term 504", hit.field("text1").getValue());
}
);
}
}
public void testMultiBM25WithAggregation() {
for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) {
assertResponse(
prepareSearch("nrd_index").setSearchType(searchType)
.setRankBuilder(new RRFRankBuilder(8, 1))
.setTrackTotalHits(false)
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "801")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.setSize(5)
.addAggregation(AggregationBuilders.terms("sums").field("int")),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 492", hit.field("text0").getValue());
assertEquals("term 508", hit.field("text1").getValue());
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertEquals("term 499", hit.field("text0").getValue());
assertEquals("term 501", hit.field("text1").getValue());
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
hit = response.getHits().getAt(3);
assertEquals(4, hit.getRank());
assertEquals("term 498", hit.field("text0").getValue());
assertEquals("term 502", hit.field("text1").getValue());
hit = response.getHits().getAt(4);
assertEquals(5, hit.getRank());
assertEquals("term 496", hit.field("text0").getValue());
assertEquals("term 504", hit.field("text1").getValue());
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(5, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(6, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(4, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
}
public void testMultiBM25AndSingleKnn() {
float[] queryVector = { 500.0f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearch))
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "800")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.addFetchField("vector_asc")
.setSize(5),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors);
}
);
}
public void testMultiBM25AndSingleKnnWithAggregation() {
float[] queryVector = { 500.0f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearch))
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "800")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.addFetchField("vector_asc")
.setSize(5)
.addAggregation(AggregationBuilders.terms("sums").field("int")),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors);
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(35, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(35, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(34, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
public void testMultiBM25AndMultipleKnn() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "800")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.addFetchField("vector_asc")
.addFetchField("vector_desc")
.setSize(5),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors);
}
);
}
public void testMultiBM25AndMultipleKnnWithAggregation() {
float[] queryVectorAsc = { 500.0f };
float[] queryVectorDesc = { 500.0f };
KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, 10f, null, null);
KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, 10f, null, null);
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1))
.setTrackTotalHits(false)
.setKnnSearch(List.of(knnSearchAsc, knnSearchDesc))
.setSubSearches(
List.of(
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "500")).boost(10.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "499")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "498")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "497")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "496")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "495")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "494")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "492")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "491")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text0", "490")).boost(1.0f))
),
new SubSearchSourceBuilder(
QueryBuilders.boolQuery()
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "508")).boost(9.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "304")).boost(8.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "501")).boost(7.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "504")).boost(6.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "492")).boost(5.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "502")).boost(4.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "499")).boost(3.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "800")).boost(2.0f))
.should(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("text1", "201")).boost(1.0f))
)
)
)
.addFetchField("text0")
.addFetchField("text1")
.addFetchField("vector_asc")
.addFetchField("vector_desc")
.setSize(5)
.addAggregation(AggregationBuilders.terms("sums").field("int")),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(5, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertEquals("term 500", hit.field("text0").getValue());
assertEquals("term 500", hit.field("text1").getValue());
assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0);
assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0);
Set<Double> vectors = Arrays.stream(response.getHits().getHits())
.map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue())
.collect(Collectors.toSet());
assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors);
LongTerms aggregation = response.getAggregations().get("sums");
assertEquals(3, aggregation.getBuckets().size());
for (LongTerms.Bucket bucket : aggregation.getBuckets()) {
if (0L == (long) bucket.getKey()) {
assertEquals(35, bucket.getDocCount());
} else if (1L == (long) bucket.getKey()) {
assertEquals(35, bucket.getDocCount());
} else if (2L == (long) bucket.getKey()) {
assertEquals(34, bucket.getDocCount());
} else {
throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]");
}
}
}
);
}
public void testBasicRRFExplain() {
// our query here is a top-level knn query for vector [9] and a term query for "text0: 10"
// the first result should be the one present in both queries (i.e. doc with text0: 10 and vector: [10]) and the other ones
// should only match the knn query
float[] queryVector = { 9f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null).queryName("my_knn_search");
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(100, 1))
.setKnnSearch(List.of(knnSearch))
.setQuery(QueryBuilders.termQuery("text0", "10"))
.setExplain(true)
.setSize(3),
response -> {
// we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double
assertEquals(3, response.getHits().getHits().length);
// first result is the one which matches the term (10) so we should expect an explanation for both queries
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertTrue(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(1, hit.getExplanation().getDetails()[0].getValue().intValue());
assertTrue(hit.getExplanation().getDetails()[0].getDescription().contains("query at index [0]"));
assertTrue(hit.getExplanation().getDetails()[0].getDetails().length > 0);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
// second result matched only on the knn query so no match should be expected for the term query
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
// third result matched only on the knn query so no match should be expected for the term query
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
}
);
}
public void testRRFExplainUnknownField() {
// in this test we try knn with a query on an unknown field that would be rewritten to MatchNoneQuery
// so we expect results and explanations only for the first part
float[] queryVector = { 9f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null).queryName("my_knn_search");
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(100, 1))
.setKnnSearch(List.of(knnSearch))
.setQuery(QueryBuilders.termQuery("unknown_field", "10"))
.setExplain(true)
.setSize(3),
response -> {
// we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double
assertEquals(3, response.getHits().getHits().length);
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(2, hit.getExplanation().getDetails().length);
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length, 0);
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
}
);
}
public void testRRFExplainOneUnknownFieldSubSearches() {
// this test is similar to the above with the difference that we have a list of subsearches that one would fail,
// while the other one would produce a match.
// So, we'd have a total of 3 queries, a (rewritten) MatchNoneQuery, a TermQuery, and a kNN query
float[] queryVector = { 9f };
KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, 10f, null, null).queryName("my_knn_search");
assertResponse(
prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(100, 1))
.setKnnSearch(List.of(knnSearch))
.setSubSearches(
List.of(
new SubSearchSourceBuilder(QueryBuilders.termQuery("unknown_field", "10")),
new SubSearchSourceBuilder(QueryBuilders.termQuery("text0", "10"))
)
)
.setExplain(true)
.setSize(3),
response -> {
// we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double
assertEquals(3, response.getHits().getHits().length);
// first result is the one which matches the term (10) and is 3rd closest to our query vector (9)
SearchHit hit = response.getHits().getAt(0);
assertEquals(1, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(3, hit.getExplanation().getDetails().length);
// MatchNone query
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[0].getDetails().length);
// Term query
assertTrue(hit.getExplanation().getDetails()[1].isMatch());
assertTrue(hit.getExplanation().getDetails()[1].getDescription().contains("query at index [1]"));
assertTrue(hit.getExplanation().getDetails()[1].getDetails().length > 0);
// knn query
assertTrue(hit.getExplanation().getDetails()[2].isMatch());
assertTrue(hit.getExplanation().getDetails()[2].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[2].getDetails().length > 0);
// rest of hits match only on the knn query so no match should be expected for the term query either
hit = response.getHits().getAt(1);
assertEquals(2, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(3, hit.getExplanation().getDetails().length);
// MatchNone query
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
// term query - should not match
assertFalse(hit.getExplanation().getDetails()[1].isMatch());
assertEquals(
"rrf score: [0], result not found in query at index [1]",
hit.getExplanation().getDetails()[1].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[1].getDetails().length);
// knn query
assertTrue(hit.getExplanation().getDetails()[2].isMatch());
assertTrue(hit.getExplanation().getDetails()[2].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[2].getDetails().length > 0);
// rest of hits match only on the knn query so no match should be expected for the term query either
hit = response.getHits().getAt(2);
assertEquals(3, hit.getRank());
assertTrue(hit.getExplanation().isMatch());
assertTrue(hit.getExplanation().getDescription().contains("initial ranks"));
assertEquals(3, hit.getExplanation().getDetails().length);
// MatchNone query
assertFalse(hit.getExplanation().getDetails()[0].isMatch());
assertEquals(0, hit.getExplanation().getDetails()[0].getValue().intValue());
assertEquals(
"rrf score: [0], result not found in query at index [0]",
hit.getExplanation().getDetails()[0].getDescription()
);
// term query - should not match
assertFalse(hit.getExplanation().getDetails()[1].isMatch());
assertEquals(
"rrf score: [0], result not found in query at index [1]",
hit.getExplanation().getDetails()[1].getDescription()
);
assertEquals(0, hit.getExplanation().getDetails()[1].getDetails().length);
// knn query
assertTrue(hit.getExplanation().getDetails()[2].isMatch());
assertTrue(hit.getExplanation().getDetails()[2].getDescription().contains("[my_knn_search]"));
assertTrue(hit.getExplanation().getDetails()[2].getDetails().length > 0);
}
);
}
}
| RRFRankMultiShardIT |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/consumer/ConsumerStatusSubCommand.java | {
"start": 1629,
"end": 6955
} | class ____ implements SubCommand {
public static void main(String[] args) {
System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, "127.0.0.1:9876");
MQAdminStartup.main(new String[] {new ConsumerStatusSubCommand().commandName(), "-g", "benchmark_consumer"});
}
@Override
public String commandName() {
return "consumerStatus";
}
@Override
public String commandDesc() {
return "Query consumer's internal data structure.";
}
@Override
public Options buildCommandlineOptions(Options options) {
Option opt = new Option("g", "consumerGroup", true, "consumer group name");
opt.setRequired(true);
options.addOption(opt);
opt = new Option("i", "clientId", true, "The consumer's client id");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("b", "brokerAddr", true, "broker address");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("s", "jstack", false, "Run jstack command in the consumer progress");
opt.setRequired(false);
options.addOption(opt);
return options;
}
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
if (commandLine.hasOption('n')) {
defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim());
}
try {
defaultMQAdminExt.start();
String group = commandLine.getOptionValue('g').trim();
ConsumerConnection cc = commandLine.hasOption('b')
? defaultMQAdminExt.examineConsumerConnectionInfo(group, commandLine.getOptionValue('b').trim())
: defaultMQAdminExt.examineConsumerConnectionInfo(group);
boolean jstack = commandLine.hasOption('s');
if (!commandLine.hasOption('i')) {
int i = 1;
long now = System.currentTimeMillis();
final TreeMap<String/* clientId */, ConsumerRunningInfo> criTable = new TreeMap<>();
System.out.printf("%-10s %-40s %-20s %s%n",
"#Index",
"#ClientId",
"#Version",
"#ConsumerRunningInfoFile");
for (Connection conn : cc.getConnectionSet()) {
try {
ConsumerRunningInfo consumerRunningInfo =
defaultMQAdminExt.getConsumerRunningInfo(group, conn.getClientId(), jstack);
if (consumerRunningInfo != null) {
criTable.put(conn.getClientId(), consumerRunningInfo);
String filePath = now + "/" + conn.getClientId();
MixAll.string2FileNotSafe(consumerRunningInfo.formatString(), filePath);
System.out.printf("%-10d %-40s %-20s %s%n",
i++,
conn.getClientId(),
MQVersion.getVersionDesc(conn.getVersion()),
filePath);
}
} catch (Exception e) {
e.printStackTrace();
}
}
if (!criTable.isEmpty()) {
boolean subSame = ConsumerRunningInfo.analyzeSubscription(criTable);
boolean rebalanceOK = subSame && ConsumerRunningInfo.analyzeRebalance(criTable);
if (subSame) {
System.out.printf("%n%nSame subscription in the same group of consumer");
System.out.printf("%n%nRebalance %s%n", rebalanceOK ? "OK" : "Failed");
Iterator<Entry<String, ConsumerRunningInfo>> it = criTable.entrySet().iterator();
while (it.hasNext()) {
Entry<String, ConsumerRunningInfo> next = it.next();
String result =
ConsumerRunningInfo.analyzeProcessQueue(next.getKey(), next.getValue());
if (result.length() > 0) {
System.out.printf("%s", result);
}
}
} else {
System.out.printf("%n%nWARN: Different subscription in the same group of consumer!!!");
}
}
} else {
String clientId = commandLine.getOptionValue('i').trim();
ConsumerRunningInfo consumerRunningInfo =
defaultMQAdminExt.getConsumerRunningInfo(group, clientId, jstack);
if (consumerRunningInfo != null) {
System.out.printf("%s", consumerRunningInfo.formatString());
}
}
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
}
| ConsumerStatusSubCommand |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/SimplePropertyDescriptorTests.java | {
"start": 1986,
"end": 2379
} | class ____ {
@SuppressWarnings("unused")
public Object setFoo(int i, String foo) { return null; }
}
Method m = C.class.getMethod("setFoo", int.class, String.class);
Object pd = new ExtendedBeanInfo.SimpleIndexedPropertyDescriptor("foo", null, null, null, m);
assertThat(pd.toString()).contains(
"PropertyDescriptor[name=foo, propertyType=null",
"indexedPropertyType= | C |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java | {
"start": 3556,
"end": 33308
} | class ____ extends ESTestCase {
private ThreadPool threadPool;
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getTestName());
}
@Override
public void tearDown() throws Exception {
terminate(threadPool);
super.tearDown();
}
public void testReplication() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState initialState = stateWithActivePrimary(index, true, randomInt(5));
IndexMetadata indexMetadata = initialState.getMetadata().getProject().index(index);
final long primaryTerm = indexMetadata.primaryTerm(0);
final IndexShardRoutingTable indexShardRoutingTable = initialState.getRoutingTable().shardRoutingTable(shardId);
ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
initialState = ClusterState.builder(initialState)
.nodes(DiscoveryNodes.builder(initialState.nodes()).localNodeId(primaryShard.relocatingNodeId()))
.build();
primaryShard = primaryShard.getTargetRelocatingShard();
}
// add a few in-sync allocation ids that don't have corresponding routing entries
final Set<String> staleAllocationIds = Sets.newHashSet(generateRandomStringArray(4, 10, false));
final Set<String> inSyncAllocationIds = Sets.union(indexMetadata.inSyncAllocationIds(0), staleAllocationIds);
final Set<String> trackedShards = new HashSet<>();
final Set<String> untrackedShards = new HashSet<>();
addTrackingInfo(indexShardRoutingTable, primaryShard, trackedShards, untrackedShards);
trackedShards.addAll(staleAllocationIds);
final ReplicationGroup replicationGroup = new ReplicationGroup(indexShardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, initialState, trackedShards);
final Map<ShardRouting, Exception> simulatedFailures = new HashMap<>();
final Map<ShardRouting, Exception> reportedFailures = new HashMap<>();
for (ShardRouting replica : expectedReplicas) {
if (randomBoolean()) {
Exception t;
boolean criticalFailure = randomBoolean();
if (criticalFailure) {
t = new CorruptIndexException("simulated", (String) null);
reportedFailures.put(replica, t);
} else {
t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING);
}
logger.debug("--> simulating failure on {} with [{}]", replica, t.getClass().getSimpleName());
simulatedFailures.put(replica, t);
}
}
Request request = new Request(shardId);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures);
final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool);
final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm);
op.execute();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
assertThat(replicasProxy.failedReplicas, equalTo(simulatedFailures.keySet()));
assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds));
assertThat("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get(), equalTo(true));
assertTrue("listener is not marked as done", listener.isDone());
ShardInfo shardInfo = listener.actionGet().getShardInfo();
assertThat(shardInfo.getFailed(), equalTo(reportedFailures.size()));
assertThat(shardInfo.getFailures(), arrayWithSize(reportedFailures.size()));
assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - simulatedFailures.size()));
final List<ShardRouting> unassignedShards = indexShardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED);
final int totalShards = 1 + expectedReplicas.size() + unassignedShards.size() + untrackedShards.size();
assertThat(replicationGroup.toString(), shardInfo.getTotal(), equalTo(totalShards));
assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint));
assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints));
assertThat(primary.knownGlobalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.globalCheckpoint));
assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints));
}
public void testRetryTransientReplicationFailure() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState initialState = stateWithActivePrimary(index, true, randomInt(5));
IndexMetadata indexMetadata = initialState.getMetadata().getProject().index(index);
final long primaryTerm = indexMetadata.primaryTerm(0);
final IndexShardRoutingTable indexShardRoutingTable = initialState.getRoutingTable().shardRoutingTable(shardId);
ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
initialState = ClusterState.builder(initialState)
.nodes(DiscoveryNodes.builder(initialState.nodes()).localNodeId(primaryShard.relocatingNodeId()))
.build();
primaryShard = primaryShard.getTargetRelocatingShard();
}
// add a few in-sync allocation ids that don't have corresponding routing entries
final Set<String> staleAllocationIds = Sets.newHashSet(generateRandomStringArray(4, 10, false));
final Set<String> inSyncAllocationIds = Sets.union(indexMetadata.inSyncAllocationIds(0), staleAllocationIds);
final Set<String> trackedShards = new HashSet<>();
final Set<String> untrackedShards = new HashSet<>();
addTrackingInfo(indexShardRoutingTable, primaryShard, trackedShards, untrackedShards);
trackedShards.addAll(staleAllocationIds);
final ReplicationGroup replicationGroup = new ReplicationGroup(indexShardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, initialState, trackedShards);
final Map<ShardRouting, Exception> simulatedFailures = new HashMap<>();
for (ShardRouting replica : expectedReplicas) {
Exception cause;
Exception exception;
if (randomBoolean()) {
if (randomBoolean()) {
cause = new CircuitBreakingException("broken", CircuitBreaker.Durability.PERMANENT);
} else {
cause = new EsRejectedExecutionException("rejected");
}
exception = new RemoteTransportException("remote", cause);
} else {
TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 9300);
DiscoveryNode node = DiscoveryNodeUtils.create("replica", address);
cause = new ConnectTransportException(node, "broken");
exception = cause;
}
logger.debug("--> simulating failure on {} with [{}]", replica, exception.getClass().getSimpleName());
simulatedFailures.put(replica, exception);
}
Request request = new Request(shardId);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures, true);
final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool);
final TestReplicationOperation op = new TestReplicationOperation(
request,
primary,
listener,
replicasProxy,
primaryTerm,
TimeValue.timeValueMillis(20),
TimeValue.timeValueSeconds(60)
);
op.execute();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
assertThat(replicasProxy.failedReplicas.size(), equalTo(0));
assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds));
assertThat("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get(), equalTo(true));
ShardInfo shardInfo = listener.actionGet().getShardInfo();
assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size()));
final List<ShardRouting> unassignedShards = indexShardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED);
final int totalShards = 1 + expectedReplicas.size() + unassignedShards.size() + untrackedShards.size();
assertThat(replicationGroup.toString(), shardInfo.getTotal(), equalTo(totalShards));
assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint));
assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints));
assertThat(primary.knownGlobalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.globalCheckpoint));
assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints));
}
private void addTrackingInfo(
IndexShardRoutingTable indexShardRoutingTable,
ShardRouting primaryShard,
Set<String> trackedShards,
Set<String> untrackedShards
) {
for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) {
ShardRouting shr = indexShardRoutingTable.shard(copy);
if (shr.unassigned() == false) {
if (shr.initializing()) {
if (randomBoolean()) {
trackedShards.add(shr.allocationId().getId());
} else {
untrackedShards.add(shr.allocationId().getId());
}
} else {
trackedShards.add(shr.allocationId().getId());
if (shr.relocating()) {
if (primaryShard == shr.getTargetRelocatingShard() || randomBoolean()) {
trackedShards.add(shr.getTargetRelocatingShard().allocationId().getId());
} else {
untrackedShards.add(shr.getTargetRelocatingShard().allocationId().getId());
}
}
}
}
}
}
public void testNoLongerPrimary() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState initialState = stateWithActivePrimary(index, true, 1 + randomInt(2), randomInt(2));
IndexMetadata indexMetadata = initialState.getMetadata().getProject().index(index);
final long primaryTerm = indexMetadata.primaryTerm(0);
final IndexShardRoutingTable indexShardRoutingTable = initialState.getRoutingTable().shardRoutingTable(shardId);
ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
initialState = ClusterState.builder(initialState)
.nodes(DiscoveryNodes.builder(initialState.nodes()).localNodeId(primaryShard.relocatingNodeId()))
.build();
primaryShard = primaryShard.getTargetRelocatingShard();
}
// add an in-sync allocation id that doesn't have a corresponding routing entry
final Set<String> staleAllocationIds = Sets.newHashSet(randomAlphaOfLength(10));
final Set<String> inSyncAllocationIds = Sets.union(indexMetadata.inSyncAllocationIds(0), staleAllocationIds);
final Set<String> trackedShards = new HashSet<>();
addTrackingInfo(indexShardRoutingTable, primaryShard, trackedShards, new HashSet<>());
trackedShards.addAll(staleAllocationIds);
final ReplicationGroup replicationGroup = new ReplicationGroup(indexShardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, initialState, trackedShards);
final Map<ShardRouting, Exception> expectedFailures = new HashMap<>();
if (expectedReplicas.isEmpty()) {
return;
}
final ShardRouting failedReplica = randomFrom(new ArrayList<>(expectedReplicas));
expectedFailures.put(failedReplica, new CorruptIndexException("simulated", (String) null));
Request request = new Request(shardId);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final boolean testPrimaryDemotedOnStaleShardCopies = randomBoolean();
final Exception shardActionFailure;
if (randomBoolean()) {
shardActionFailure = new NodeClosedException(DiscoveryNodeUtils.create("foo"));
} else if (randomBoolean()) {
DiscoveryNode node = DiscoveryNodeUtils.create("foo");
shardActionFailure = new SendRequestTransportException(
node,
ShardStateAction.SHARD_FAILED_ACTION_NAME,
new NodeClosedException(node)
);
} else {
shardActionFailure = new ShardStateAction.NoLongerPrimaryShardException(failedReplica.shardId(), "the king is dead");
}
final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) {
@Override
public void failShardIfNeeded(
ShardRouting replica,
long primaryTerm,
String message,
Exception exception,
ActionListener<Void> shardActionListener
) {
if (testPrimaryDemotedOnStaleShardCopies) {
super.failShardIfNeeded(replica, primaryTerm, message, exception, shardActionListener);
} else {
assertThat(replica, equalTo(failedReplica));
shardActionListener.onFailure(shardActionFailure);
}
}
@Override
public void markShardCopyAsStaleIfNeeded(
ShardId shardId,
String allocationId,
long primaryTerm,
ActionListener<Void> shardActionListener
) {
if (testPrimaryDemotedOnStaleShardCopies) {
shardActionListener.onFailure(shardActionFailure);
} else {
super.markShardCopyAsStaleIfNeeded(shardId, allocationId, primaryTerm, shardActionListener);
}
}
};
AtomicBoolean primaryFailed = new AtomicBoolean();
final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool) {
@Override
public void failShard(String message, Exception exception) {
assertThat(exception, instanceOf(ShardStateAction.NoLongerPrimaryShardException.class));
assertTrue(primaryFailed.compareAndSet(false, true));
}
};
final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm);
op.execute();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat(
expectThrows(
ExecutionException.class,
ReplicationOperation.RetryOnPrimaryException.class,
() -> listener.get(10, TimeUnit.SECONDS)
).getMessage(),
anyOf(containsString("demoted while failing replica shard"), containsString("shutting down while failing replica shard"))
);
if (shardActionFailure instanceof ShardStateAction.NoLongerPrimaryShardException) {
assertTrue(primaryFailed.get());
} else {
assertFalse(primaryFailed.get());
}
}
public void testAddedReplicaAfterPrimaryOperation() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
final ClusterState initialState = stateWithActivePrimary(index, true, 0);
Set<String> inSyncAllocationIds = initialState.metadata().getProject().index(index).inSyncAllocationIds(0);
IndexShardRoutingTable shardRoutingTable = initialState.getRoutingTable().shardRoutingTable(shardId);
Set<String> trackedShards = new HashSet<>();
addTrackingInfo(shardRoutingTable, null, trackedShards, new HashSet<>());
ReplicationGroup initialReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final ClusterState stateWithAddedReplicas;
if (randomBoolean()) {
stateWithAddedReplicas = state(
index,
true,
ShardRoutingState.STARTED,
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED
);
} else {
stateWithAddedReplicas = state(index, true, ShardRoutingState.RELOCATING);
}
inSyncAllocationIds = stateWithAddedReplicas.metadata().getProject().index(index).inSyncAllocationIds(0);
shardRoutingTable = stateWithAddedReplicas.getRoutingTable().shardRoutingTable(shardId);
trackedShards = new HashSet<>();
addTrackingInfo(shardRoutingTable, null, trackedShards, new HashSet<>());
ReplicationGroup updatedReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final AtomicReference<ReplicationGroup> replicationGroup = new AtomicReference<>(initialReplicationGroup);
logger.debug("--> using initial replicationGroup:\n{}", replicationGroup.get());
final long primaryTerm = initialState.getMetadata().getProject().index(shardId.getIndexName()).primaryTerm(shardId.id());
final ShardRouting primaryShard = updatedReplicationGroup.getRoutingTable().primaryShard();
final TestPrimary primary = new TestPrimary(primaryShard, replicationGroup::get, threadPool) {
@Override
public void perform(Request request, ActionListener<Result> listener) {
super.perform(request, listener.map(result -> {
replicationGroup.set(updatedReplicationGroup);
logger.debug("--> state after primary operation:\n{}", replicationGroup.get());
return result;
}));
}
};
Request request = new Request(shardId);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, new TestReplicaProxy(), primaryTerm);
op.execute();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, stateWithAddedReplicas, trackedShards);
assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
}
public void testWaitForActiveShards() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
final int assignedReplicas = randomInt(2);
final int unassignedReplicas = randomInt(2);
final int totalShards = 1 + assignedReplicas + unassignedReplicas;
final int activeShardCount = randomIntBetween(0, totalShards);
final boolean unpromotableReplicas = randomBoolean();
Request request = new Request(shardId).waitForActiveShards(
activeShardCount == totalShards ? ActiveShardCount.ALL : ActiveShardCount.from(activeShardCount)
);
// In the case of unpromotables, only the search/replica assigned shards are calculated as active shards. But in other cases, or
// when the wait is for ALL active shards, ReplicationOperation#checkActiveShardCount() takes into account the primary shard as
// well, and that is why we need to increment the assigned replicas by 1 when calculating the actual active shards.
final int actualActiveShards = assignedReplicas + ((unpromotableReplicas && request.waitForActiveShards() != ActiveShardCount.ALL)
? 0
: 1);
final boolean passesActiveShardCheck = activeShardCount <= actualActiveShards;
ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
for (int i = 0; i < assignedReplicas; i++) {
replicaStates[i] = randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
}
for (int i = assignedReplicas; i < replicaStates.length; i++) {
replicaStates[i] = ShardRoutingState.UNASSIGNED;
}
final ClusterState state = state(
index,
true,
ShardRoutingState.STARTED,
unpromotableReplicas ? ShardRouting.Role.INDEX_ONLY : ShardRouting.Role.DEFAULT,
Arrays.stream(replicaStates)
.map(
shardRoutingState -> new Tuple<>(
shardRoutingState,
unpromotableReplicas ? ShardRouting.Role.SEARCH_ONLY : ShardRouting.Role.DEFAULT
)
)
.toList()
);
logger.debug(
"using active shards [{}], assigned shards [{}], total shards [{}]. unpromotable [{}]. expecting op to [{}]. state: \n{}",
request.waitForActiveShards(),
1 + assignedReplicas,
1 + assignedReplicas + unassignedReplicas,
unpromotableReplicas,
passesActiveShardCheck ? "succeed" : "retry",
state
);
final long primaryTerm = state.metadata().getProject().index(index).primaryTerm(shardId.id());
final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id());
final Set<String> inSyncAllocationIds = state.metadata().getProject().index(index).inSyncAllocationIds(0);
Set<String> trackedShards = new HashSet<>();
addTrackingInfo(shardRoutingTable, null, trackedShards, new HashSet<>());
final ReplicationGroup initialReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0);
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final ShardRouting primaryShard = shardRoutingTable.primaryShard();
final TestReplicationOperation op = new TestReplicationOperation(
request,
new TestPrimary(primaryShard, () -> initialReplicationGroup, threadPool),
listener,
new TestReplicaProxy(),
logger,
threadPool,
"test",
primaryTerm
);
if (passesActiveShardCheck) {
assertThat(op.checkActiveShardCount(), nullValue());
op.execute();
assertTrue("operations should have been performed, active shard count is met", request.processedOnPrimary.get());
} else {
assertThat(
op.checkActiveShardCount(),
equalTo(
"Not enough active copies to meet shard count of ["
+ request.waitForActiveShards()
+ "] (have "
+ actualActiveShards
+ ", needed "
+ activeShardCount
+ ")."
)
);
op.execute();
assertFalse("operations should not have been perform, active shard count is *NOT* met", request.processedOnPrimary.get());
assertListenerThrows("should throw exception to trigger retry", listener, UnavailableShardsException.class);
}
}
public void testPrimaryFailureHandlingReplicaResponse() throws Exception {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
final Request request = new Request(shardId);
final ClusterState state = stateWithActivePrimary(index, true, 1, 0);
final IndexMetadata indexMetadata = state.getMetadata().getProject().index(index);
final long primaryTerm = indexMetadata.primaryTerm(0);
final ShardRouting primaryRouting = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
final Set<String> inSyncAllocationIds = indexMetadata.inSyncAllocationIds(0);
final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id());
final Set<String> trackedShards = shardRoutingTable.getPromotableAllocationIds();
final ReplicationGroup initialReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0);
final Thread testThread = Thread.currentThread();
final boolean fatal = randomBoolean();
final PlainActionFuture<Void> primaryFailedFuture = new PlainActionFuture<>();
final ReplicationOperation.Primary<Request, Request, TestPrimary.Result> primary = new TestPrimary(
primaryRouting,
() -> initialReplicationGroup,
threadPool
) {
@Override
public void failShard(String message, Exception exception) {
assertNotSame(testThread, Thread.currentThread());
assertThat(Thread.currentThread().getName(), containsString('[' + ThreadPool.Names.WRITE + ']'));
assertTrue(fatal);
primaryFailedFuture.onResponse(null);
}
@Override
public void updateLocalCheckpointForShard(String allocationId, long checkpoint) {
if (primaryRouting.allocationId().getId().equals(allocationId)) {
super.updateLocalCheckpointForShard(allocationId, checkpoint);
} else {
if (fatal) {
throw new NullPointerException();
} else {
throw new AlreadyClosedException("already closed");
}
}
}
};
final PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
final ReplicationOperation.Replicas<Request> replicas = new TestReplicaProxy(Collections.emptyMap());
TestReplicationOperation operation = new TestReplicationOperation(request, primary, listener, replicas, primaryTerm);
operation.execute();
if (fatal) {
primaryFailedFuture.get(10, TimeUnit.SECONDS);
}
final ShardInfo shardInfo = listener.actionGet().getShardInfo();
assertThat(shardInfo.getFailed(), equalTo(0));
assertThat(shardInfo.getFailures(), arrayWithSize(0));
assertThat(shardInfo.getSuccessful(), equalTo(1 + getExpectedReplicas(shardId, state, trackedShards).size()));
}
private Set<ShardRouting> getExpectedReplicas(ShardId shardId, ClusterState state, Set<String> trackedShards) {
Set<ShardRouting> expectedReplicas = new HashSet<>();
String localNodeId = state.nodes().getLocalNodeId();
if (state.routingTable().hasIndex(shardId.getIndexName())) {
final IndexShardRoutingTable indexShardRoutingTable = state.routingTable().shardRoutingTable(shardId);
for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) {
ShardRouting shardRouting = indexShardRoutingTable.shard(copy);
if (shardRouting.unassigned()) {
continue;
}
if (localNodeId.equals(shardRouting.currentNodeId()) == false) {
if (trackedShards.contains(shardRouting.allocationId().getId())) {
expectedReplicas.add(shardRouting);
}
}
if (shardRouting.relocating() && localNodeId.equals(shardRouting.relocatingNodeId()) == false) {
if (trackedShards.contains(shardRouting.getTargetRelocatingShard().allocationId().getId())) {
expectedReplicas.add(shardRouting.getTargetRelocatingShard());
}
}
}
}
return expectedReplicas;
}
public static | ReplicationOperationTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java | {
"start": 1611,
"end": 7999
} | class ____ extends ESIntegTestCase {
private static final String INDEX_NAME = "test_index";
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put("xpack.license.self_generated.type", "trial")
.build();
}
@Before
public void setup() throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder()
.startObject()
.startObject("properties")
.startObject("vector")
.field("type", "dense_vector")
.field("dims", 1)
.field("index", true)
.field("similarity", "l2_norm")
.startObject("index_options")
.field("type", "hnsw")
.endObject()
.endObject()
.startObject("text")
.field("type", "text")
.endObject()
.startObject("integer")
.field("type", "integer")
.endObject()
.startObject("topic")
.field("type", "keyword")
.endObject()
.endObject()
.endObject();
assertAcked(prepareCreate(INDEX_NAME).setMapping(builder));
ensureGreen(INDEX_NAME);
}
private void performSearch(SearchSourceBuilder source) throws IOException {
Request request = new Request("GET", INDEX_NAME + "/_search");
request.setJsonEntity(Strings.toString(source));
getRestClient().performRequest(request);
}
public void testTelemetryForRRFRetriever() throws IOException {
if (false == isRetrieverTelemetryEnabled()) {
return;
}
// search#1 - this will record 1 entry for "retriever" in `sections`, and 1 for "knn" under `retrievers`
{
performSearch(
new SearchSourceBuilder().retriever(new KnnRetrieverBuilder("vector", new float[] { 1.0f }, null, 10, 15, 10f, null, null))
);
}
// search#2 - this will record 1 entry for "retriever" in `sections`, 1 for "standard" under `retrievers`, and 1 for "range" under
// `queries`
{
performSearch(new SearchSourceBuilder().retriever(new StandardRetrieverBuilder(QueryBuilders.rangeQuery("integer").gte(2))));
}
// search#3 - this will record 1 entry for "retriever" in `sections`, and 1 for "standard" under `retrievers`, and 1 for "knn" under
// `queries`
{
performSearch(
new SearchSourceBuilder().retriever(
new StandardRetrieverBuilder(new KnnVectorQueryBuilder("vector", new float[] { 1.0f }, 10, 15, 10f, null, null))
)
);
}
// search#4 - this will record 1 entry for "retriever" in `sections`, and 1 for "standard" under `retrievers`, and 1 for "term"
// under `queries`
{
performSearch(new SearchSourceBuilder().retriever(new StandardRetrieverBuilder(QueryBuilders.termQuery("topic", "foo"))));
}
// search#5 - this will record 1 entry for "retriever" in `sections`, and 1 for "text_similarity_reranker" under `retrievers`, as
// well as
// 1 "standard" under `retrievers`, and eventually 1 for "match" under `queries`
{
performSearch(
new SearchSourceBuilder().retriever(
new TextSimilarityRankRetrieverBuilder(
new StandardRetrieverBuilder(QueryBuilders.matchQuery("text", "foo")),
"some_inference_id",
"some_inference_text",
"some_field",
10,
false,
null
)
)
);
}
// search#6 - this will record 1 entry for "knn" in `sections`
{
performSearch(
new SearchSourceBuilder().knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 1.0f }, 10, 15, 10f, null, null)))
);
}
// search#7 - this will record 1 entry for "query" in `sections`, and 1 for "match_all" under `queries`
{
performSearch(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()));
}
// cluster stats
{
SearchUsageStats stats = clusterAdmin().prepareClusterStats().get().getIndicesStats().getSearchUsageStats();
assertEquals(7, stats.getTotalSearchCount());
assertThat(stats.getSectionsUsage().size(), equalTo(3));
assertThat(stats.getSectionsUsage().get("retriever"), equalTo(5L));
assertThat(stats.getSectionsUsage().get("query"), equalTo(1L));
assertThat(stats.getSectionsUsage().get("knn"), equalTo(1L));
assertThat(stats.getRetrieversUsage().size(), equalTo(3));
assertThat(stats.getRetrieversUsage().get("standard"), equalTo(4L));
assertThat(stats.getRetrieversUsage().get("knn"), equalTo(1L));
assertThat(stats.getRetrieversUsage().get("text_similarity_reranker"), equalTo(1L));
assertThat(stats.getQueryUsage().size(), equalTo(5));
assertThat(stats.getQueryUsage().get("range"), equalTo(1L));
assertThat(stats.getQueryUsage().get("term"), equalTo(1L));
assertThat(stats.getQueryUsage().get("match"), equalTo(1L));
assertThat(stats.getQueryUsage().get("match_all"), equalTo(1L));
assertThat(stats.getQueryUsage().get("knn"), equalTo(1L));
}
}
private boolean isRetrieverTelemetryEnabled() throws IOException {
NodesCapabilitiesResponse res = clusterAdmin().nodesCapabilities(
new NodesCapabilitiesRequest().method(RestRequest.Method.GET).path("_cluster/stats").capabilities("retrievers-usage-stats")
).actionGet();
return res != null && res.isSupported().orElse(false);
}
}
| TextSimilarityRankRetrieverTelemetryTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/BZip2Utils.java | {
"start": 1185,
"end": 2484
} | class ____ {
private BZip2Utils() {
}
/**
* Returns the start offsets of blocks that follow the first block in the
* BZip2 compressed file at the given path. The first offset corresponds to
* the first byte containing the BZip2 block marker of the second block. The
* i-th offset corresponds to the block marker of the (i + 1)-th block.
*/
public static List<Long> getNextBlockMarkerOffsets(
Path path, Configuration conf) throws IOException {
FileSystem fs = path.getFileSystem(conf);
try (InputStream fileIn = fs.open(path)) {
return getNextBlockMarkerOffsets(fileIn);
}
}
/**
* Returns the start offsets of blocks that follow the first block in the
* BZip2 compressed input stream. The first offset corresponds to
* the first byte containing the BZip2 block marker of the second block. The
* i-th offset corresponds to the block marker of the (i + 1)-th block.
*/
public static List<Long> getNextBlockMarkerOffsets(InputStream rawIn)
throws IOException {
try (CBZip2InputStream in = new CBZip2InputStream(rawIn, BYBLOCK)) {
ArrayList<Long> offsets = new ArrayList<>();
while (in.skipToNextBlockMarker()) {
offsets.add(in.getProcessedByteCount());
}
return offsets;
}
}
}
| BZip2Utils |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/annotation/EndpointDiscovererTests.java | {
"start": 18665,
"end": 18838
} | class ____ {
}
@Import({ TestEndpoint.class, SpecializedTestEndpoint.class, ProxiedSpecializedTestEndpointConfiguration.class })
static | SubSpecializedEndpointsConfiguration |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/ImplicitParamsForCreatorTest.java | {
"start": 813,
"end": 1213
} | class ____ extends JacksonAnnotationIntrospector
{
@Override
public String findImplicitPropertyName(MapperConfig<?> config, AnnotatedMember param) {
if (param instanceof AnnotatedParameter ap) {
return "paramName"+ap.getIndex();
}
return super.findImplicitPropertyName(config, param);
}
}
static | MyParamIntrospector |
java | spring-projects__spring-boot | module/spring-boot-webmvc-test/src/test/java/org/springframework/boot/webmvc/test/autoconfigure/mockmvc/ExampleIdConverter.java | {
"start": 1033,
"end": 1210
} | class ____ implements Converter<String, ExampleId> {
@Override
public ExampleId convert(String source) {
return new ExampleId(UUID.fromString(source));
}
}
| ExampleIdConverter |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/context/properties/ConfigurationPropertiesReportEndpointTests.java | {
"start": 22722,
"end": 23183
} | class ____ {
private final String name;
private int counter;
@Autowired
AutowiredProperties(String name) {
this.name = name;
}
public String getName() {
return this.name;
}
public int getCounter() {
return this.counter;
}
public void setCounter(int counter) {
this.counter = counter;
}
}
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(ImmutableNestedProperties.class)
static | AutowiredProperties |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | {
"start": 138646,
"end": 146358
} | class ____ extends DanglingFileHandler {
private final Path destination;
DanglingFileRecoverer(Path destination) {
this.destination = destination;
}
@Override
void handleFile(FileMetadata file, FileMetadata tempFile)
throws IOException {
LOG.debug("Recovering {}", file.getKey());
// Move to the final destination
String finalDestinationKey =
pathToKey(new Path(destination, file.getKey()));
store.rename(tempFile.getKey(), finalDestinationKey);
if (!finalDestinationKey.equals(file.getKey())) {
// Delete the empty link file now that we've restored it.
store.delete(file.getKey());
}
}
}
/**
* Check if a path has colons in its name
*/
private boolean containsColon(Path p) {
return p.toUri().getPath().toString().contains(":");
}
/**
* Implements recover and delete (-move and -delete) behaviors for handling
* dangling files (blobs whose upload was interrupted).
*
* @param root
* The root path to check from.
* @param handler
* The handler that deals with dangling files.
*/
private void handleFilesWithDanglingTempData(Path root,
DanglingFileHandler handler) throws IOException {
// Calculate the cut-off for when to consider a blob to be dangling.
long cutoffForDangling = new Date().getTime()
- getConf().getInt(AZURE_TEMP_EXPIRY_PROPERTY_NAME,
AZURE_TEMP_EXPIRY_DEFAULT) * 1000;
// Go over all the blobs under the given root and look for blobs to
// recover.
FileMetadata[] listing = store.list(pathToKey(root), AZURE_LIST_ALL,
AZURE_UNBOUNDED_DEPTH);
for (FileMetadata file : listing) {
if (!file.isDirectory()) { // We don't recover directory blobs
// See if this blob has a link in it (meaning it's a place-holder
// blob for when the upload to the temp blob is complete).
String link = store.getLinkInFileMetadata(file.getKey());
if (link != null) {
// It has a link, see if the temp blob it is pointing to is
// existent and old enough to be considered dangling.
FileMetadata linkMetadata = store.retrieveMetadata(link);
if (linkMetadata != null
&& linkMetadata.getModificationTime() >= cutoffForDangling) {
// Found one!
handler.handleFile(file, linkMetadata);
}
}
}
}
}
/**
* Looks under the given root path for any blob that are left "dangling",
* meaning that they are place-holder blobs that we created while we upload
* the data to a temporary blob, but for some reason we crashed in the middle
* of the upload and left them there. If any are found, we move them to the
* destination given.
*
* @param root
* The root path to consider.
* @param destination
* The destination path to move any recovered files to.
* @throws IOException Thrown when fail to recover files.
*/
public void recoverFilesWithDanglingTempData(Path root, Path destination)
throws IOException {
LOG.debug("Recovering files with dangling temp data in {}", root);
handleFilesWithDanglingTempData(root,
new DanglingFileRecoverer(destination));
}
/**
* Looks under the given root path for any blob that are left "dangling",
* meaning that they are place-holder blobs that we created while we upload
* the data to a temporary blob, but for some reason we crashed in the middle
* of the upload and left them there. If any are found, we delete them.
*
* @param root
* The root path to consider.
* @throws IOException Thrown when fail to delete.
*/
public void deleteFilesWithDanglingTempData(Path root) throws IOException {
LOG.debug("Deleting files with dangling temp data in {}", root);
handleFilesWithDanglingTempData(root, new DanglingFileDeleter());
}
@Override
protected void finalize() throws Throwable {
LOG.debug("finalize() called.");
close();
super.finalize();
}
/**
* Encode the key with a random prefix for load balancing in Azure storage.
* Upload data to a random temporary file then do storage side renaming to
* recover the original key.
*
* @param aKey a key to be encoded.
* @return Encoded version of the original key.
*/
private static String encodeKey(String aKey) {
// Get the tail end of the key name.
//
String fileName = aKey.substring(aKey.lastIndexOf(Path.SEPARATOR) + 1,
aKey.length());
// Construct the randomized prefix of the file name. The prefix ensures the
// file always drops into the same folder but with a varying tail key name.
String filePrefix = AZURE_TEMP_FOLDER + Path.SEPARATOR
+ UUID.randomUUID().toString();
// Concatenate the randomized prefix with the tail of the key name.
String randomizedKey = filePrefix + fileName;
// Return to the caller with the randomized key.
return randomizedKey;
}
/*
* Helper method to retrieve owner information for a given path.
* The method returns empty string in case the file is not found or the metadata does not contain owner information
*/
@VisibleForTesting
public String getOwnerForPath(Path absolutePath) throws IOException {
String owner = "";
FileMetadata meta = null;
String key = pathToKey(absolutePath);
try {
meta = store.retrieveMetadata(key);
if (meta != null) {
owner = meta.getOwner();
LOG.debug("Retrieved '{}' as owner for path - {}", owner, absolutePath);
} else {
// meta will be null if file/folder doen not exist
LOG.debug("Cannot find file/folder - '{}'. Returning owner as empty string", absolutePath);
}
} catch(IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
boolean isfileNotFoundException = innerException instanceof StorageException
&& NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException);
// should not throw when the exception is related to blob/container/file/folder not found
if (!isfileNotFoundException) {
String errorMsg = "Could not retrieve owner information for path - " + absolutePath;
LOG.error(errorMsg);
throw new IOException(errorMsg, ex);
}
}
return owner;
}
/**
* Helper method to update the chownAllowedUsers in tests.
* @param chownAllowedUsers list of chown allowed users
*/
@VisibleForTesting
void updateChownAllowedUsers(List<String> chownAllowedUsers) {
this.chownAllowedUsers = chownAllowedUsers;
}
/**
* Helper method to update the chmodAllowedUsers in tests.
* @param chmodAllowedUsers list of chmod allowed users
*/
@VisibleForTesting
void updateChmodAllowedUsers(List<String> chmodAllowedUsers) {
this.chmodAllowedUsers = chmodAllowedUsers;
}
/**
* Helper method to update the daemonUsers in tests.
* @param daemonUsers list of daemon users
*/
@VisibleForTesting
void updateDaemonUsers(List<String> daemonUsers) {
this.daemonUsers = daemonUsers;
}
@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
switch (validatePathCapabilityArgs(path, capability)) {
case CommonPathCapabilities.FS_PERMISSIONS:
return true;
// Append support is dynamic
case CommonPathCapabilities.FS_APPEND:
return appendSupportEnabled;
default:
return super.hasPathCapability(path, capability);
}
}
}
| DanglingFileRecoverer |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/source/nullvaluecheckstrategy/RockFestivalMapper.java | {
"start": 471,
"end": 823
} | class ____ {
public static final RockFestivalMapper INSTANCE = Mappers.getMapper( RockFestivalMapper.class );
@Mapping( target = "stage", source = "artistName" )
public abstract RockFestivalTarget map( RockFestivalSource in );
public Stage artistToStage( String name ) {
return Stage.forArtist( name );
}
}
| RockFestivalMapper |
java | square__retrofit | retrofit-adapters/java8/src/main/java/retrofit2/adapter/java8/Java8CallAdapterFactory.java | {
"start": 1249,
"end": 1373
} | class ____ {@link Retrofit} allows you to return {@link CompletableFuture} from
* service methods.
* <pre><code>
* | to |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/entity/case1/String_100_Entity.java | {
"start": 52,
"end": 14812
} | class ____ {
private String f0;
private String f1;
private String f2;
private String f3;
private String f4;
private String f5;
private String f6;
private String f7;
private String f8;
private String f9;
private String f10;
private String f11;
private String f12;
private String f13;
private String f14;
private String f15;
private String f16;
private String f17;
private String f18;
private String f19;
private String f20;
private String f21;
private String f22;
private String f23;
private String f24;
private String f25;
private String f26;
private String f27;
private String f28;
private String f29;
private String f30;
private String f31;
private String f32;
private String f33;
private String f34;
private String f35;
private String f36;
private String f37;
private String f38;
private String f39;
private String f40;
private String f41;
private String f42;
private String f43;
private String f44;
private String f45;
private String f46;
private String f47;
private String f48;
private String f49;
private String f50;
private String f51;
private String f52;
private String f53;
private String f54;
private String f55;
private String f56;
private String f57;
private String f58;
private String f59;
private String f60;
private String f61;
private String f62;
private String f63;
private String f64;
private String f65;
private String f66;
private String f67;
private String f68;
private String f69;
private String f70;
private String f71;
private String f72;
private String f73;
private String f74;
private String f75;
private String f76;
private String f77;
private String f78;
private String f79;
private String f80;
private String f81;
private String f82;
private String f83;
private String f84;
private String f85;
private String f86;
private String f87;
private String f88;
private String f89;
private String f90;
private String f91;
private String f92;
private String f93;
private String f94;
private String f95;
private String f96;
private String f97;
private String f98;
private String f99;
public String getF0() {
return f0;
}
public void setF0(String f0) {
this.f0 = f0;
}
public String getF1() {
return f1;
}
public void setF1(String f1) {
this.f1 = f1;
}
public String getF2() {
return f2;
}
public void setF2(String f2) {
this.f2 = f2;
}
public String getF3() {
return f3;
}
public void setF3(String f3) {
this.f3 = f3;
}
public String getF4() {
return f4;
}
public void setF4(String f4) {
this.f4 = f4;
}
public String getF5() {
return f5;
}
public void setF5(String f5) {
this.f5 = f5;
}
public String getF6() {
return f6;
}
public void setF6(String f6) {
this.f6 = f6;
}
public String getF7() {
return f7;
}
public void setF7(String f7) {
this.f7 = f7;
}
public String getF8() {
return f8;
}
public void setF8(String f8) {
this.f8 = f8;
}
public String getF9() {
return f9;
}
public void setF9(String f9) {
this.f9 = f9;
}
public String getF10() {
return f10;
}
public void setF10(String f10) {
this.f10 = f10;
}
public String getF11() {
return f11;
}
public void setF11(String f11) {
this.f11 = f11;
}
public String getF12() {
return f12;
}
public void setF12(String f12) {
this.f12 = f12;
}
public String getF13() {
return f13;
}
public void setF13(String f13) {
this.f13 = f13;
}
public String getF14() {
return f14;
}
public void setF14(String f14) {
this.f14 = f14;
}
public String getF15() {
return f15;
}
public void setF15(String f15) {
this.f15 = f15;
}
public String getF16() {
return f16;
}
public void setF16(String f16) {
this.f16 = f16;
}
public String getF17() {
return f17;
}
public void setF17(String f17) {
this.f17 = f17;
}
public String getF18() {
return f18;
}
public void setF18(String f18) {
this.f18 = f18;
}
public String getF19() {
return f19;
}
public void setF19(String f19) {
this.f19 = f19;
}
public String getF20() {
return f20;
}
public void setF20(String f20) {
this.f20 = f20;
}
public String getF21() {
return f21;
}
public void setF21(String f21) {
this.f21 = f21;
}
public String getF22() {
return f22;
}
public void setF22(String f22) {
this.f22 = f22;
}
public String getF23() {
return f23;
}
public void setF23(String f23) {
this.f23 = f23;
}
public String getF24() {
return f24;
}
public void setF24(String f24) {
this.f24 = f24;
}
public String getF25() {
return f25;
}
public void setF25(String f25) {
this.f25 = f25;
}
public String getF26() {
return f26;
}
public void setF26(String f26) {
this.f26 = f26;
}
public String getF27() {
return f27;
}
public void setF27(String f27) {
this.f27 = f27;
}
public String getF28() {
return f28;
}
public void setF28(String f28) {
this.f28 = f28;
}
public String getF29() {
return f29;
}
public void setF29(String f29) {
this.f29 = f29;
}
public String getF30() {
return f30;
}
public void setF30(String f30) {
this.f30 = f30;
}
public String getF31() {
return f31;
}
public void setF31(String f31) {
this.f31 = f31;
}
public String getF32() {
return f32;
}
public void setF32(String f32) {
this.f32 = f32;
}
public String getF33() {
return f33;
}
public void setF33(String f33) {
this.f33 = f33;
}
public String getF34() {
return f34;
}
public void setF34(String f34) {
this.f34 = f34;
}
public String getF35() {
return f35;
}
public void setF35(String f35) {
this.f35 = f35;
}
public String getF36() {
return f36;
}
public void setF36(String f36) {
this.f36 = f36;
}
public String getF37() {
return f37;
}
public void setF37(String f37) {
this.f37 = f37;
}
public String getF38() {
return f38;
}
public void setF38(String f38) {
this.f38 = f38;
}
public String getF39() {
return f39;
}
public void setF39(String f39) {
this.f39 = f39;
}
public String getF40() {
return f40;
}
public void setF40(String f40) {
this.f40 = f40;
}
public String getF41() {
return f41;
}
public void setF41(String f41) {
this.f41 = f41;
}
public String getF42() {
return f42;
}
public void setF42(String f42) {
this.f42 = f42;
}
public String getF43() {
return f43;
}
public void setF43(String f43) {
this.f43 = f43;
}
public String getF44() {
return f44;
}
public void setF44(String f44) {
this.f44 = f44;
}
public String getF45() {
return f45;
}
public void setF45(String f45) {
this.f45 = f45;
}
public String getF46() {
return f46;
}
public void setF46(String f46) {
this.f46 = f46;
}
public String getF47() {
return f47;
}
public void setF47(String f47) {
this.f47 = f47;
}
public String getF48() {
return f48;
}
public void setF48(String f48) {
this.f48 = f48;
}
public String getF49() {
return f49;
}
public void setF49(String f49) {
this.f49 = f49;
}
public String getF50() {
return f50;
}
public void setF50(String f50) {
this.f50 = f50;
}
public String getF51() {
return f51;
}
public void setF51(String f51) {
this.f51 = f51;
}
public String getF52() {
return f52;
}
public void setF52(String f52) {
this.f52 = f52;
}
public String getF53() {
return f53;
}
public void setF53(String f53) {
this.f53 = f53;
}
public String getF54() {
return f54;
}
public void setF54(String f54) {
this.f54 = f54;
}
public String getF55() {
return f55;
}
public void setF55(String f55) {
this.f55 = f55;
}
public String getF56() {
return f56;
}
public void setF56(String f56) {
this.f56 = f56;
}
public String getF57() {
return f57;
}
public void setF57(String f57) {
this.f57 = f57;
}
public String getF58() {
return f58;
}
public void setF58(String f58) {
this.f58 = f58;
}
public String getF59() {
return f59;
}
public void setF59(String f59) {
this.f59 = f59;
}
public String getF60() {
return f60;
}
public void setF60(String f60) {
this.f60 = f60;
}
public String getF61() {
return f61;
}
public void setF61(String f61) {
this.f61 = f61;
}
public String getF62() {
return f62;
}
public void setF62(String f62) {
this.f62 = f62;
}
public String getF63() {
return f63;
}
public void setF63(String f63) {
this.f63 = f63;
}
public String getF64() {
return f64;
}
public void setF64(String f64) {
this.f64 = f64;
}
public String getF65() {
return f65;
}
public void setF65(String f65) {
this.f65 = f65;
}
public String getF66() {
return f66;
}
public void setF66(String f66) {
this.f66 = f66;
}
public String getF67() {
return f67;
}
public void setF67(String f67) {
this.f67 = f67;
}
public String getF68() {
return f68;
}
public void setF68(String f68) {
this.f68 = f68;
}
public String getF69() {
return f69;
}
public void setF69(String f69) {
this.f69 = f69;
}
public String getF70() {
return f70;
}
public void setF70(String f70) {
this.f70 = f70;
}
public String getF71() {
return f71;
}
public void setF71(String f71) {
this.f71 = f71;
}
public String getF72() {
return f72;
}
public void setF72(String f72) {
this.f72 = f72;
}
public String getF73() {
return f73;
}
public void setF73(String f73) {
this.f73 = f73;
}
public String getF74() {
return f74;
}
public void setF74(String f74) {
this.f74 = f74;
}
public String getF75() {
return f75;
}
public void setF75(String f75) {
this.f75 = f75;
}
public String getF76() {
return f76;
}
public void setF76(String f76) {
this.f76 = f76;
}
public String getF77() {
return f77;
}
public void setF77(String f77) {
this.f77 = f77;
}
public String getF78() {
return f78;
}
public void setF78(String f78) {
this.f78 = f78;
}
public String getF79() {
return f79;
}
public void setF79(String f79) {
this.f79 = f79;
}
public String getF80() {
return f80;
}
public void setF80(String f80) {
this.f80 = f80;
}
public String getF81() {
return f81;
}
public void setF81(String f81) {
this.f81 = f81;
}
public String getF82() {
return f82;
}
public void setF82(String f82) {
this.f82 = f82;
}
public String getF83() {
return f83;
}
public void setF83(String f83) {
this.f83 = f83;
}
public String getF84() {
return f84;
}
public void setF84(String f84) {
this.f84 = f84;
}
public String getF85() {
return f85;
}
public void setF85(String f85) {
this.f85 = f85;
}
public String getF86() {
return f86;
}
public void setF86(String f86) {
this.f86 = f86;
}
public String getF87() {
return f87;
}
public void setF87(String f87) {
this.f87 = f87;
}
public String getF88() {
return f88;
}
public void setF88(String f88) {
this.f88 = f88;
}
public String getF89() {
return f89;
}
public void setF89(String f89) {
this.f89 = f89;
}
public String getF90() {
return f90;
}
public void setF90(String f90) {
this.f90 = f90;
}
public String getF91() {
return f91;
}
public void setF91(String f91) {
this.f91 = f91;
}
public String getF92() {
return f92;
}
public void setF92(String f92) {
this.f92 = f92;
}
public String getF93() {
return f93;
}
public void setF93(String f93) {
this.f93 = f93;
}
public String getF94() {
return f94;
}
public void setF94(String f94) {
this.f94 = f94;
}
public String getF95() {
return f95;
}
public void setF95(String f95) {
this.f95 = f95;
}
public String getF96() {
return f96;
}
public void setF96(String f96) {
this.f96 = f96;
}
public String getF97() {
return f97;
}
public void setF97(String f97) {
this.f97 = f97;
}
public String getF98() {
return f98;
}
public void setF98(String f98) {
this.f98 = f98;
}
public String getF99() {
return f99;
}
public void setF99(String f99) {
this.f99 = f99;
}
}
| String_100_Entity |
java | elastic__elasticsearch | x-pack/plugin/migrate/src/test/java/org/elasticsearch/system_indices/action/GetFeatureUpgradeStatusResponseTests.java | {
"start": 1174,
"end": 5516
} | class ____ extends AbstractWireSerializingTestCase<GetFeatureUpgradeStatusResponse> {
@Override
protected Writeable.Reader<GetFeatureUpgradeStatusResponse> instanceReader() {
return GetFeatureUpgradeStatusResponse::new;
}
@Override
protected GetFeatureUpgradeStatusResponse createTestInstance() {
return new GetFeatureUpgradeStatusResponse(
randomList(8, GetFeatureUpgradeStatusResponseTests::createFeatureStatus),
randomFrom(GetFeatureUpgradeStatusResponse.UpgradeStatus.values())
);
}
@Override
protected GetFeatureUpgradeStatusResponse mutateInstance(GetFeatureUpgradeStatusResponse instance) {
return new GetFeatureUpgradeStatusResponse(
randomList(
8,
() -> randomValueOtherThanMany(
instance.getFeatureUpgradeStatuses()::contains,
GetFeatureUpgradeStatusResponseTests::createFeatureStatus
)
),
randomValueOtherThan(instance.getUpgradeStatus(), () -> randomFrom(GetFeatureUpgradeStatusResponse.UpgradeStatus.values()))
);
}
/** If constructor is called with null for a list, we just use an empty list */
public void testConstructorHandlesNullLists() {
GetFeatureUpgradeStatusResponse response = new GetFeatureUpgradeStatusResponse(null, MIGRATION_NEEDED);
assertThat(response.getFeatureUpgradeStatuses(), notNullValue());
assertThat(response.getFeatureUpgradeStatuses(), equalTo(Collections.emptyList()));
}
public void testUpgradeStatusCominations() {
assertEquals(NO_MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, NO_MIGRATION_NEEDED));
assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, MIGRATION_NEEDED));
assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, NO_MIGRATION_NEEDED));
assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, MIGRATION_NEEDED));
assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, NO_MIGRATION_NEEDED));
assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, IN_PROGRESS));
assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, IN_PROGRESS));
assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, MIGRATION_NEEDED));
assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, IN_PROGRESS));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, NO_MIGRATION_NEEDED));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, ERROR));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, ERROR));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, MIGRATION_NEEDED));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, ERROR));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, IN_PROGRESS));
assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, ERROR));
}
private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatureStatus() {
return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus(
randomAlphaOfLengthBetween(3, 20),
randomFrom(IndexVersion.current(), IndexVersions.MINIMUM_COMPATIBLE),
randomFrom(GetFeatureUpgradeStatusResponse.UpgradeStatus.values()),
randomList(4, GetFeatureUpgradeStatusResponseTests::getIndexInfo)
);
}
private static GetFeatureUpgradeStatusResponse.IndexInfo getIndexInfo() {
return new GetFeatureUpgradeStatusResponse.IndexInfo(
randomAlphaOfLengthBetween(3, 20),
randomFrom(IndexVersion.current(), IndexVersions.MINIMUM_COMPATIBLE),
null
);
}
}
| GetFeatureUpgradeStatusResponseTests |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/LoggerConfig.java | {
"start": 40309,
"end": 40418
} | class ____ {
public Level level;
public List<AppenderRef> refs;
}
protected | LevelAndRefs |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/EnumUtils.java | {
"start": 16464,
"end": 16581
} | enum ____ comparison to {@code enumName}.
* @param defaultEnum the default enum.
* @return an enum, default | for |
java | apache__camel | components/camel-cxf/camel-cxf-rest/src/generated/java/org/apache/camel/component/cxf/jaxrs/CxfRsComponentConfigurer.java | {
"start": 736,
"end": 3766
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
CxfRsComponent target = (CxfRsComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "headerfilterstrategy":
case "headerFilterStrategy": target.setHeaderFilterStrategy(property(camelContext, org.apache.camel.spi.HeaderFilterStrategy.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "synchronous": target.setSynchronous(property(camelContext, boolean.class, value)); return true;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": target.setUseGlobalSslContextParameters(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "headerfilterstrategy":
case "headerFilterStrategy": return org.apache.camel.spi.HeaderFilterStrategy.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "synchronous": return boolean.class;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
CxfRsComponent target = (CxfRsComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "headerfilterstrategy":
case "headerFilterStrategy": return target.getHeaderFilterStrategy();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "synchronous": return target.isSynchronous();
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return target.isUseGlobalSslContextParameters();
default: return null;
}
}
}
| CxfRsComponentConfigurer |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/api/reactive/RedisScriptingReactiveCommands.java | {
"start": 1288,
"end": 6707
} | interface ____<K, V> {
/**
* Execute a Lua script server side.
*
* @param script Lua 5.1 script.
* @param type output type.
* @param keys key names.
* @param <T> expected return type.
* @return script result.
*/
<T> Flux<T> eval(String script, ScriptOutputType type, K... keys);
/**
* Execute a Lua script server side.
*
* @param script Lua 5.1 script.
* @param type output type.
* @param keys key names.
* @param <T> expected return type.
* @return script result.
* @since 6.0
*/
<T> Flux<T> eval(byte[] script, ScriptOutputType type, K... keys);
/**
* Execute a Lua script server side.
*
* @param script Lua 5.1 script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
*/
<T> Flux<T> eval(String script, ScriptOutputType type, K[] keys, V... values);
/**
* Execute a Lua script server side.
*
* @param script Lua 5.1 script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
* @since 6.0
*/
<T> Flux<T> eval(byte[] script, ScriptOutputType type, K[] keys, V... values);
/**
* This is a read-only variant of the EVAL command that cannot execute commands that modify data.
*
* @param script Lua 5.1 script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
* @since 6.4
*/
<T> Flux<T> evalReadOnly(String script, ScriptOutputType type, K[] keys, V... values);
/**
* This is a read-only variant of the EVAL command that cannot execute commands that modify data.
*
* @param script Lua 5.1 script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
* @since 6.2
*/
<T> Flux<T> evalReadOnly(byte[] script, ScriptOutputType type, K[] keys, V... values);
/**
* Evaluates a script cached on the server side by its SHA1 digest.
*
* @param digest SHA1 of the script.
* @param type the type.
* @param keys the keys.
* @param <T> expected return type.
* @return script result.
*/
<T> Flux<T> evalsha(String digest, ScriptOutputType type, K... keys);
/**
* Execute a Lua script server side.
*
* @param digest SHA1 of the script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
*/
<T> Flux<T> evalsha(String digest, ScriptOutputType type, K[] keys, V... values);
/**
* This is a read-only variant of the EVALSHA command that cannot execute commands that modify data.
*
* @param digest SHA1 of the script.
* @param type the type.
* @param keys the keys.
* @param values the values.
* @param <T> expected return type.
* @return script result.
* @since 6.2
*/
<T> Flux<T> evalshaReadOnly(String digest, ScriptOutputType type, K[] keys, V... values);
/**
* Check existence of scripts in the script cache.
*
* @param digests script digests.
* @return Boolean array-reply The command returns an array of integers that correspond to the specified SHA1 digest
* arguments. For every corresponding SHA1 digest of a script that actually exists in the script cache, an 1 is
* returned, otherwise 0 is returned.
*/
Flux<Boolean> scriptExists(String... digests);
/**
* Remove all the scripts from the script cache.
*
* @return String simple-string-reply.
*/
Mono<String> scriptFlush();
/**
* Remove all the scripts from the script cache using the specified {@link FlushMode}.
*
* @param flushMode the flush mode (sync/async).
* @return String simple-string-reply.
* @since 6.1
*/
Mono<String> scriptFlush(FlushMode flushMode);
/**
* Kill the script currently in execution.
*
* @return String simple-string-reply.
*/
Mono<String> scriptKill();
/**
* Load the specified Lua script into the script cache.
*
* @param script script content.
* @return String bulk-string-reply This command returns the SHA1 digest of the script added into the script cache.
* @since 6.0
*/
Mono<String> scriptLoad(String script);
/**
* Load the specified Lua script into the script cache.
*
* @param script script content.
* @return String bulk-string-reply This command returns the SHA1 digest of the script added into the script cache.
* @since 6.0
*/
Mono<String> scriptLoad(byte[] script);
/**
* Create a SHA1 digest from a Lua script.
*
* @param script script content.
* @return the SHA1 value.
* @since 6.0
*/
String digest(String script);
/**
* Create a SHA1 digest from a Lua script.
*
* @param script script content.
* @return the SHA1 value.
* @since 6.0
*/
String digest(byte[] script);
}
| RedisScriptingReactiveCommands |
java | spring-projects__spring-boot | module/spring-boot-jdbc-test/src/test/java/org/springframework/boot/jdbc/test/autoconfigure/ExampleJdbcClientRepository.java | {
"start": 910,
"end": 1664
} | class ____ {
private static final ExampleEntityRowMapper ROW_MAPPER = new ExampleEntityRowMapper();
private final JdbcClient jdbcClient;
ExampleJdbcClientRepository(JdbcClient jdbcClient) {
this.jdbcClient = jdbcClient;
}
void save(ExampleEntity entity) {
this.jdbcClient.sql("insert into example (id, name) values (:id, :name)")
.param("id", entity.getId())
.param("name", entity.getName())
.update();
}
ExampleEntity findById(int id) {
return this.jdbcClient.sql("select id, name from example where id = :id")
.param("id", id)
.query(ROW_MAPPER)
.single();
}
Collection<ExampleEntity> findAll() {
return this.jdbcClient.sql("select id, name from example").query(ROW_MAPPER).list();
}
}
| ExampleJdbcClientRepository |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindyDoNotRemoveQuotesCsvUnmarshallTest.java | {
"start": 2633,
"end": 3025
} | class ____ extends RouteBuilder {
BindyCsvDataFormat camelDataFormat = new BindyCsvDataFormat(BindyDoNotRemoveQuotesCsvUnmarshallTest.Product.class);
@Override
public void configure() {
from(URI_DIRECT_START).unmarshal(camelDataFormat).to(URI_MOCK_RESULT);
}
}
@CsvRecord(separator = ",", removeQuotes = false)
public static | ContextConfig |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/pool/TestDruidOraclePreparedStatement.java | {
"start": 822,
"end": 3590
} | class ____ extends TestCase {
private String jdbcUrl;
private String user;
private String password;
private DruidDataSource dataSource;
public void setUp() throws Exception {
jdbcUrl = "jdbc:oracle:thin:@a.b.c.d:1521:ocnauto";
// jdbcUrl = "jdbc:oracle:thin:@a.b.c.d:1521:ocnauto"; // error url
user = "alibaba";
password = "ccbuauto";
dataSource = new DruidDataSource();
dataSource.setPoolPreparedStatements(true);
dataSource.setUrl(jdbcUrl);
dataSource.setUsername(user);
dataSource.setPassword(password);
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_0() throws Exception {
Class.forName(JdbcUtils.getDriverClassName(jdbcUrl));
// {
// Connection conn = dataSource.getConnection();
//
// ResultSet metaRs = conn.getMetaData().getTables(null, "ALIBABA", null, new String[] { "TABLE" });
// JdbcUtils.printResultSet(metaRs);
// metaRs.close();
//
// conn.close();
// }
// {
// Connection conn = dataSource.getConnection();
// Statement stmt = conn.createStatement();
//
// ResultSet rs = stmt.executeQuery("SELECT * FROM WP_ORDERS");
// JdbcUtils.printResultSet(rs);
// rs.close();
//
// stmt.close();
// conn.close();
// }
for (int i = 0; i < 3; ++i) {
Connection conn = dataSource.getConnection();
// ResultSet metaRs = conn.getMetaData().getTables(null, "ALIBABA", null, new String[] {"TABLE"});
// JdbcUtils.printResultSet(metaRs);
// metaRs.close();
String sql = "SELECT * FROM WS_OFFER WHERE ROWNUM <= ?";
PreparedStatement stmt = conn.prepareStatement(sql);
stmt.setInt(1, 1);
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
// Process the result set
}
rs.close();
stmt.close();
conn.close();
}
{
Connection conn = dataSource.getConnection();
// ResultSet metaRs = conn.getMetaData().getTables(null, "ALIBABA", null, new String[] {"TABLE"});
// JdbcUtils.printResultSet(metaRs);
// metaRs.close();
String sql = "SELECT * FROM WS_OFFER WHERE ROWNUM <= ?";
PreparedStatement stmt = conn.prepareStatement(sql);
stmt.setInt(1, 11);
ResultSet rs = stmt.executeQuery();
rs.next();
rs.close();
stmt.close();
conn.close();
}
}
}
| TestDruidOraclePreparedStatement |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java | {
"start": 14241,
"end": 14629
} | class ____ extends PerShardCollector {
NonScoringPerShardCollector(ShardContext shardContext, Sort sort, int limit) {
// We don't use CollectorManager here as we don't retrieve the total hits and sort by score.
super(shardContext, new TopFieldCollectorManager(sort, limit, null, 0).newCollector());
}
}
static final | NonScoringPerShardCollector |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/FunctionIdentifier.java | {
"start": 1524,
"end": 4901
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private final @Nullable ObjectIdentifier objectIdentifier;
private final @Nullable String functionName;
public static FunctionIdentifier of(ObjectIdentifier oi) {
return new FunctionIdentifier(oi);
}
public static FunctionIdentifier of(String functionName) {
return new FunctionIdentifier(functionName);
}
private FunctionIdentifier(ObjectIdentifier objectIdentifier) {
checkNotNull(objectIdentifier, "Object identifier cannot be null");
this.objectIdentifier = objectIdentifier;
this.functionName = null;
}
private FunctionIdentifier(String functionName) {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(functionName),
"function name cannot be null or empty string");
this.functionName = functionName;
this.objectIdentifier = null;
}
/** Normalize a function name. */
public static String normalizeName(String name) {
return name.toLowerCase();
}
/** Normalize an object identifier by only normalizing the function name. */
public static ObjectIdentifier normalizeObjectIdentifier(ObjectIdentifier oi) {
return ObjectIdentifier.of(
oi.getCatalogName(), oi.getDatabaseName(), normalizeName(oi.getObjectName()));
}
public Optional<ObjectIdentifier> getIdentifier() {
return Optional.ofNullable(objectIdentifier);
}
public Optional<String> getSimpleName() {
return Optional.ofNullable(functionName);
}
public String getFunctionName() {
if (objectIdentifier != null) {
return objectIdentifier.getObjectName();
} else {
return functionName;
}
}
/** List of the component names of this function identifier. */
public List<String> toList() {
if (objectIdentifier != null) {
return objectIdentifier.toList();
} else if (functionName != null) {
return Collections.singletonList(functionName);
} else {
throw new IllegalStateException(
"functionName and objectIdentifier are both null which should never happen.");
}
}
/** Returns a string that summarizes this instance for printing to a console or log. */
public String asSummaryString() {
if (objectIdentifier != null) {
return String.join(
".",
objectIdentifier.getCatalogName(),
objectIdentifier.getDatabaseName(),
objectIdentifier.getObjectName());
} else {
return functionName;
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FunctionIdentifier that = (FunctionIdentifier) o;
return Objects.equals(objectIdentifier, that.objectIdentifier)
&& Objects.equals(functionName, that.functionName);
}
@Override
public int hashCode() {
return Objects.hash(objectIdentifier, functionName);
}
@Override
public String toString() {
return asSummaryString();
}
}
| FunctionIdentifier |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/namedparam/BeanPropertySqlParameterSource.java | {
"start": 1671,
"end": 3677
} | class ____ extends AbstractSqlParameterSource {
private final BeanWrapper beanWrapper;
private String @Nullable [] propertyNames;
/**
* Create a new BeanPropertySqlParameterSource for the given bean.
* @param object the bean instance to wrap
*/
public BeanPropertySqlParameterSource(Object object) {
this.beanWrapper = PropertyAccessorFactory.forBeanPropertyAccess(object);
}
@Override
public boolean hasValue(String paramName) {
return this.beanWrapper.isReadableProperty(paramName);
}
@Override
public @Nullable Object getValue(String paramName) throws IllegalArgumentException {
try {
return this.beanWrapper.getPropertyValue(paramName);
}
catch (NotReadablePropertyException ex) {
throw new IllegalArgumentException(ex.getMessage());
}
}
/**
* Derives a default SQL type from the corresponding property type.
* @see org.springframework.jdbc.core.StatementCreatorUtils#javaTypeToSqlParameterType
*/
@Override
public int getSqlType(String paramName) {
int sqlType = super.getSqlType(paramName);
if (sqlType != TYPE_UNKNOWN) {
return sqlType;
}
Class<?> propType = this.beanWrapper.getPropertyType(paramName);
return StatementCreatorUtils.javaTypeToSqlParameterType(propType);
}
@Override
public String[] getParameterNames() {
return getReadablePropertyNames();
}
/**
* Provide access to the property names of the wrapped bean.
* Uses support provided in the {@link PropertyAccessor} interface.
* @return an array containing all the known property names
*/
public String[] getReadablePropertyNames() {
if (this.propertyNames == null) {
List<String> names = new ArrayList<>();
PropertyDescriptor[] props = this.beanWrapper.getPropertyDescriptors();
for (PropertyDescriptor pd : props) {
if (this.beanWrapper.isReadableProperty(pd.getName())) {
names.add(pd.getName());
}
}
this.propertyNames = StringUtils.toStringArray(names);
}
return this.propertyNames;
}
}
| BeanPropertySqlParameterSource |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java | {
"start": 33057,
"end": 33172
} | class ____ {
Lock lock;
@GuardedBy("lock")
int x;
static | Test |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java | {
"start": 19768,
"end": 19892
} | interface ____ throwing up "raw" exceptions, this handler
* gets the translated one.
*/
@FunctionalInterface
public | is |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java | {
"start": 1396,
"end": 2159
} | class ____ {
private static final String CONFIGURABLE_WEB_ENVIRONMENT_CLASS = "org.springframework.web.context.ConfigurableWebEnvironment";
private static final Set<String> SERVLET_ENVIRONMENT_SOURCE_NAMES;
static {
Set<String> names = new HashSet<>();
names.add(StandardServletEnvironment.SERVLET_CONTEXT_PROPERTY_SOURCE_NAME);
names.add(StandardServletEnvironment.SERVLET_CONFIG_PROPERTY_SOURCE_NAME);
names.add(StandardServletEnvironment.JNDI_PROPERTY_SOURCE_NAME);
SERVLET_ENVIRONMENT_SOURCE_NAMES = Collections.unmodifiableSet(names);
}
private final ClassLoader classLoader;
/**
* Creates a new {@link EnvironmentConverter} that will use the given
* {@code classLoader} during conversion.
* @param classLoader the | EnvironmentConverter |
java | google__guava | android/guava/src/com/google/common/collect/LexicographicalOrdering.java | {
"start": 1035,
"end": 2468
} | class ____<T extends @Nullable Object> extends Ordering<Iterable<T>>
implements Serializable {
final Comparator<? super T> elementOrder;
LexicographicalOrdering(Comparator<? super T> elementOrder) {
this.elementOrder = elementOrder;
}
@Override
public int compare(Iterable<T> leftIterable, Iterable<T> rightIterable) {
Iterator<T> left = leftIterable.iterator();
Iterator<T> right = rightIterable.iterator();
while (left.hasNext()) {
if (!right.hasNext()) {
return LEFT_IS_GREATER; // because it's longer
}
int result = elementOrder.compare(left.next(), right.next());
if (result != 0) {
return result;
}
}
if (right.hasNext()) {
return RIGHT_IS_GREATER; // because it's longer
}
return 0;
}
@Override
public boolean equals(@Nullable Object object) {
if (object == this) {
return true;
}
if (object instanceof LexicographicalOrdering) {
LexicographicalOrdering<?> that = (LexicographicalOrdering<?>) object;
return this.elementOrder.equals(that.elementOrder);
}
return false;
}
@Override
public int hashCode() {
return elementOrder.hashCode() ^ 2075626741; // meaningless
}
@Override
public String toString() {
return elementOrder + ".lexicographical()";
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
| LexicographicalOrdering |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJsonSerialization.java | {
"start": 1710,
"end": 6266
} | class ____ implements Serializable {
private String name;
private String value;
KeyVal(String name, String value) {
this.name = name;
this.value = value;
}
KeyVal() {
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SimpleJson{");
sb.append("name='").append(name).append('\'');
sb.append(", value='").append(value).append('\'');
sb.append('}');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KeyVal that = (KeyVal) o;
return Objects.equals(name, that.name) &&
Objects.equals(value, that.value);
}
@Override
public int hashCode() {
return Objects.hash(name, value);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
@Test
public void testStringRoundTrip() throws Throwable {
String wire = serDeser.toJson(source);
KeyVal unmarshalled = serDeser.fromJson(wire);
assertEquals(source, unmarshalled, "Failed to unmarshall: " + wire);
}
@Test
public void testBytesRoundTrip() throws Throwable {
byte[] wire = serDeser.toBytes(source);
KeyVal unmarshalled = serDeser.fromBytes(wire);
assertEquals(source, unmarshalled);
}
@Test
public void testBadBytesRoundTrip() throws Throwable {
LambdaTestUtils.intercept(JsonParseException.class,
"token",
() -> serDeser.fromBytes(new byte[]{'a'}));
}
@Test
public void testCloneViaJson() throws Throwable {
KeyVal unmarshalled = serDeser.fromInstance(source);
assertEquals(source, unmarshalled);
}
@Test
public void testFileRoundTrip() throws Throwable {
File tempFile = File.createTempFile("Keyval", ".json");
tempFile.delete();
try {
serDeser.save(tempFile, source);
assertEquals(source, serDeser.load(tempFile));
} finally {
tempFile.delete();
}
}
@Test
public void testEmptyFile() throws Throwable {
File tempFile = File.createTempFile("Keyval", ".json");
try {
LambdaTestUtils.intercept(EOFException.class,
"empty",
() -> serDeser.load(tempFile));
} finally {
tempFile.delete();
}
}
/**
* round trip through both load APIs.
*/
@Test
public void testFileSystemRoundTrip() throws Throwable {
File tempFile = File.createTempFile("Keyval", ".json");
tempFile.delete();
Path tempPath = new Path(tempFile.toURI());
LocalFileSystem fs = FileSystem.getLocal(new Configuration());
try {
serDeser.save(fs, tempPath, source, false);
assertEquals(source, serDeser.load(fs, tempPath),
"JSON loaded with load(fs, path)");
assertEquals(source, serDeser.load(fs, tempPath, fs.getFileStatus(tempPath)),
"JSON loaded with load(fs, path, status)");
} finally {
fs.delete(tempPath, false);
}
}
/**
* 0 byte file through the load(path) API will fail with a wrapped
* Parser exception.
* 0 byte file through the load(path, status) API will fail with a wrapped
* Parser exception.
*/
@Test
public void testFileSystemEmptyPath() throws Throwable {
File tempFile = File.createTempFile("Keyval", ".json");
Path tempPath = new Path(tempFile.toURI());
LocalFileSystem fs = FileSystem.getLocal(new Configuration());
try {
LambdaTestUtils.intercept(PathIOException.class,
() -> serDeser.load(fs, tempPath));
fs.delete(tempPath, false);
LambdaTestUtils.intercept(FileNotFoundException.class,
() -> serDeser.load(fs, tempPath));
} finally {
fs.delete(tempPath, false);
}
}
/**
* 0 byte file through the load(path, status) API will fail with an
* EOFException.
*/
@Test
public void testFileSystemEmptyStatus() throws Throwable {
File tempFile = File.createTempFile("Keyval", ".json");
Path tempPath = new Path(tempFile.toURI());
LocalFileSystem fs = FileSystem.getLocal(new Configuration());
try {
final FileStatus st = fs.getFileStatus(tempPath);
LambdaTestUtils.intercept(EOFException.class,
() -> serDeser.load(fs, tempPath, st));
} finally {
fs.delete(tempPath, false);
}
}
}
| KeyVal |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java | {
"start": 40879,
"end": 41491
} | class ____ {
@JsonProperty
private final UUID processId;
@JsonProperty
private final String newField;
// required by jackson -- do not remove, your IDE may be warning that this is unused but it's lying to you
public FutureStateDirectoryProcessFile() {
this.processId = null;
this.newField = null;
}
FutureStateDirectoryProcessFile(final UUID processId, final String newField) {
this.processId = processId;
this.newField = newField;
}
}
private static | FutureStateDirectoryProcessFile |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/ResolvableTypeTests.java | {
"start": 78970,
"end": 79006
} | enum ____ { VALUE }
static | SimpleEnum |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest20.java | {
"start": 937,
"end": 2641
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"BEGIN" +
" sys.dbms_stats.gather_table_stats(" +
" ownname => 'ESCROW',tabname => 'HT_TASK_TRADE_HISTORY',estimate_percent=>0.5, cascade => TRUE" +
" ,method_opt=>'FOR ALL COLUMNS SIZE 1',no_invalidate => false); " +
"END; ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("\"DUAL\"")));
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
| OracleSelectTest20 |
java | spring-projects__spring-security | config/src/integration-test/java/org/springframework/security/config/annotation/authentication/ldap/LdapAuthenticationProviderConfigurerTests.java | {
"start": 5842,
"end": 6242
} | class ____ {
@Autowired
void configure(AuthenticationManagerBuilder auth) throws Exception {
// @formatter:off
auth
.ldapAuthentication()
.groupSearchBase("ou=groups")
.groupSearchFilter("(member={0})")
.userDnPatterns("uid={0},ou=people")
.contextSource()
.port(0);
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | LdapWithRandomPortConfig |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHErasureCodingStep.java | {
"start": 1197,
"end": 2046
} | class ____
implements ErasureCodingStep {
private ECBlock[] inputBlocks;
private ECBlock[] outputBlocks;
private static final int SUB_PACKET_SIZE = 2;
/**
* Constructor given input blocks and output blocks.
*
* @param inputBlocks inputBlocks.
* @param outputBlocks outputBlocks.
*/
public HHErasureCodingStep(ECBlock[] inputBlocks,
ECBlock[] outputBlocks) {
this.inputBlocks = inputBlocks;
this.outputBlocks = outputBlocks;
}
protected int getSubPacketSize() {
return SUB_PACKET_SIZE;
}
@Override
public ECBlock[] getInputBlocks() {
return inputBlocks;
}
@Override
public ECBlock[] getOutputBlocks() {
return outputBlocks;
}
@Override
public void finish() {
// TODO: Finalize encoder/decoder if necessary
}
}
| HHErasureCodingStep |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ResolvedCatalogModelJsonDeserializer.java | {
"start": 2615,
"end": 4067
} | class ____ extends StdDeserializer<ResolvedCatalogModel> {
private static final long serialVersionUID = 1L;
public ResolvedCatalogModelJsonDeserializer() {
super(ResolvedCatalogModel.class);
}
@Override
public ResolvedCatalogModel deserialize(JsonParser jsonParser, DeserializationContext ctx)
throws IOException {
ObjectNode jsonNode = jsonParser.readValueAsTree();
ObjectCodec codec = jsonParser.getCodec();
ResolvedSchema inputSchema =
ctx.readValue(
traverse(jsonNode.required(INPUT_SCHEMA), codec), ResolvedSchema.class);
ResolvedSchema outputSchema =
ctx.readValue(
traverse(jsonNode.required(OUTPUT_SCHEMA), codec), ResolvedSchema.class);
String comment = deserializeFieldOrNull(jsonNode, COMMENT, String.class, codec, ctx);
Map<String, String> options =
deserializeMapOrEmpty(jsonNode, OPTIONS, String.class, String.class, codec, ctx);
return new DefaultResolvedCatalogModel(
CatalogModel.of(
Schema.newBuilder().fromResolvedSchema(inputSchema).build(),
Schema.newBuilder().fromResolvedSchema(outputSchema).build(),
options,
comment),
inputSchema,
outputSchema);
}
}
| ResolvedCatalogModelJsonDeserializer |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/client/lock/NacosLockService.java | {
"start": 1018,
"end": 1960
} | class ____ implements LockService {
public static final AtomicBoolean IS_THROW_EXCEPTION = new AtomicBoolean(false);
public NacosLockService(Properties properties) throws NacosException {
if (IS_THROW_EXCEPTION.get()) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, "mock exception");
}
}
@Override
public Boolean lock(LockInstance instance) throws NacosException {
return null;
}
@Override
public Boolean unLock(LockInstance instance) throws NacosException {
return null;
}
@Override
public Boolean remoteTryLock(LockInstance instance) throws NacosException {
return null;
}
@Override
public Boolean remoteReleaseLock(LockInstance instance) throws NacosException {
return null;
}
@Override
public void shutdown() throws NacosException {
}
}
| NacosLockService |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/integration/single/SingleRegistryCenterDubboProtocolIntegrationTest.java | {
"start": 2778,
"end": 2892
} | class ____ implement some methods as base for single registry center.
*/
@DisabledForJreRange(min = JRE.JAVA_16)
| will |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/io/SimpleVersionedSerializerTypeSerializerProxyTest.java | {
"start": 2132,
"end": 2912
} | class ____ implements SimpleVersionedSerializer<String> {
private static final int VERSION = 1;
@Override
public int getVersion() {
return VERSION;
}
@Override
public byte[] serialize(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
@Override
public String deserialize(int version, byte[] serialized) {
assertThat(version).isEqualTo(VERSION);
return new String(serialized, StandardCharsets.UTF_8);
}
@Override
public int hashCode() {
return 1;
}
@Override
public boolean equals(Object obj) {
return obj instanceof TestStringSerializer;
}
}
}
| TestStringSerializer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ThreadJoinLoopTest.java | {
"start": 17763,
"end": 19850
} | class ____ {
public void basicCase(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void emptyInterruptedFullException(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void emptyException(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void emptyCatchStatements(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void whileLoop(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void whileLoopCheck(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void whileLoopVariable(Thread thread, boolean threadAlive) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void basicLoopOtherStatements(Thread thread) {
while (7 == 7) {
System.out.println("test");
Uninterruptibles.joinUninterruptibly(thread);
}
}
public void breakStatement(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
private void whileLoopBreak(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
private void whileLoopThreadAlive(Thread thread) {
Uninterruptibles.joinUninterruptibly(thread);
}
public void multipleStatements(Thread thread, boolean isAlive) {
Uninterruptibles.joinUninterruptibly(thread);
}
private void arrayJoin(Thread[] threads) {
for (int i = 0; i < threads.length; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
}
| ThreadJoinLoopPositiveCases |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/JDKValueInstantiators.java | {
"start": 6443,
"end": 6956
} | class ____
extends JDKValueInstantiator
{
static final LinkedHashMapInstantiator INSTANCE = new LinkedHashMapInstantiator();
public LinkedHashMapInstantiator() {
super(LinkedHashMap.class);
}
@Override
public Object createUsingDefault(DeserializationContext ctxt) {
return new LinkedHashMap<>();
}
}
// @since 2.17 [databind#4299] Instantiators for additional container classes
private static | LinkedHashMapInstantiator |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/date/DateParseTest9.java | {
"start": 387,
"end": 2696
} | class ____ extends TestCase {
private static Random random = new Random();
private TimeZone original = TimeZone.getDefault();
private String[] zoneIds = TimeZone.getAvailableIDs();
@Override
public void setUp() {
int index = random.nextInt(zoneIds.length);
TimeZone timeZone = TimeZone.getTimeZone(zoneIds[index]);
TimeZone.setDefault(timeZone);
JSON.defaultTimeZone = timeZone;
}
@Override
public void tearDown () {
TimeZone.setDefault(original);
JSON.defaultTimeZone = original;
}
public void test_date() throws Exception {
String text = "\"/Date(1242357713797+0800)/\"";
Date date = JSON.parseObject(text, Date.class);
assertEquals(date.getTime(), 1242357713797L);
assertEquals(JSONToken.LITERAL_INT, CalendarCodec.instance.getFastMatchToken());
text = "\"/Date(1242357713797+0545)/\"";
date = JSON.parseObject(text, Date.class);
assertEquals(date.getTime(), 1242357713797L);
assertEquals(JSONToken.LITERAL_INT, CalendarCodec.instance.getFastMatchToken());
}
public void test_error() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"date\":\"/Date(1242357713797A0800)/\"}", VO.class);
} catch (Exception ex) {
error = ex;
}
assertNotNull(error);
}
public void test_error_1() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"date\":\"/Date(1242357713797#0800)/\"}", VO.class);
} catch (Exception ex) {
error = ex;
}
assertNotNull(error);
}
public void test_dates_different_timeZones() {
Calendar cal = Calendar.getInstance();
Date now = cal.getTime();
VO vo = new VO();
vo.date = now;
String json = JSON.toJSONString(vo);
VO result = JSON.parseObject(json, VO.class);
assertEquals(vo.date, result.date);
// with iso-format
json = JSON.toJSONString(vo, SerializerFeature.UseISO8601DateFormat);
result = JSON.parseObject(json, VO.class);
assertEquals(JSON.toJSONString(vo.date), JSON.toJSONString(result.date));
}
public static | DateParseTest9 |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/domain/blog/Blog.java | {
"start": 721,
"end": 1615
} | class ____ {
private int id;
private String title;
private Author author;
private List<Post> posts;
public Blog() {
}
public Blog(int id, String title, Author author, List<Post> posts) {
this();
this.id = id;
this.title = title;
this.author = author;
this.posts = posts;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public Author getAuthor() {
return author;
}
public void setAuthor(Author author) {
this.author = author;
}
public List<Post> getPosts() {
return posts;
}
public void setPosts(List<Post> posts) {
this.posts = posts;
}
@Override
public String toString() {
return "Blog: " + id + " : " + title + " (" + author + ")";
}
}
| Blog |
java | apache__camel | components/camel-zipfile/src/test/java/org/apache/camel/processor/aggregate/zipfile/AggregationStrategyWithPreservationTest.java | {
"start": 1584,
"end": 4288
} | class ____ extends CamelTestSupport {
private static final int EXPECTED_NO_FILES = 5;
private static final String TEST_DIR = "target/out_AggregationStrategyWithPreservationTest";
@BeforeEach
public void deleteTestDirs() {
deleteDirectory(TEST_DIR);
}
@Test
public void testSplitter() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:aggregateToZipEntry");
mock.expectedMessageCount(1);
MockEndpoint.assertIsSatisfied(context);
File[] files = new File(TEST_DIR).listFiles();
assertNotNull(files);
assertTrue(files.length > 0, "Should be a file in " + TEST_DIR + " directory");
File resultFile = files[0];
Set<String> expectedZipFiles = new HashSet<>(
Arrays.asList("another/hello.txt",
"other/greetings.txt",
"chiau.txt", "hi.txt", "hola.txt"));
ZipInputStream zin = new ZipInputStream(new FileInputStream(resultFile));
try {
int fileCount = 0;
for (ZipEntry ze = zin.getNextEntry(); ze != null; ze = zin.getNextEntry()) {
if (!ze.isDirectory()) {
assertTrue(expectedZipFiles.remove(ze.toString()), "Found unexpected entry " + ze + " in zipfile");
fileCount++;
}
}
assertEquals(AggregationStrategyWithPreservationTest.EXPECTED_NO_FILES, fileCount,
String.format("Zip file should contains %d files, got %d files",
AggregationStrategyWithPreservationTest.EXPECTED_NO_FILES, fileCount));
assertEquals(0, expectedZipFiles.size(),
"Should have found all of the zip files in the file. Remaining: " + expectedZipFiles);
} finally {
IOHelper.close(zin);
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// Unzip file and Split it according to FileEntry
from("file:src/test/resources/org/apache/camel/aggregate/zipfile/data?delay=1000&noop=true&recursive=true")
.aggregate(new ZipAggregationStrategy(true, true))
.constant(true)
.completionFromBatchConsumer()
.eagerCheckCompletion()
.to("file:" + TEST_DIR)
.to("mock:aggregateToZipEntry")
.log("Done processing zip file: ${header.CamelFileName}");
}
};
}
}
| AggregationStrategyWithPreservationTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java | {
"start": 76515,
"end": 85047
} | interface ____ show it.
jobEdge.setShipStrategyName(partitioner.toString());
jobEdge.setDownstreamSubtaskStateMapper(partitioner.getDownstreamSubtaskStateMapper());
jobEdge.setUpstreamSubtaskStateMapper(partitioner.getUpstreamSubtaskStateMapper());
if (LOG.isDebugEnabled()) {
LOG.debug(
"CONNECTED: {} - {} -> {}",
partitioner.getClass().getSimpleName(),
headOfChain,
downStreamVertexID);
}
return jobEdge.getSource();
}
private static boolean isPersistentIntermediateDataset(
ResultPartitionType resultPartitionType, StreamEdge edge) {
return resultPartitionType.isBlockingOrBlockingPersistentResultPartition()
&& edge.getIntermediateDatasetIdToProduce() != null;
}
private static void checkBufferTimeout(ResultPartitionType type, StreamEdge edge) {
long bufferTimeout = edge.getBufferTimeout();
if (!type.canBePipelinedConsumed()
&& bufferTimeout != ExecutionOptions.DISABLED_NETWORK_BUFFER_TIMEOUT) {
throw new UnsupportedOperationException(
"only canBePipelinedConsumed partition support buffer timeout "
+ bufferTimeout
+ " for src operator in edge "
+ edge
+ ". \nPlease either disable buffer timeout (via -1) or use the canBePipelinedConsumed partition.");
}
}
private static ResultPartitionType getResultPartitionType(
StreamEdge edge, JobVertexBuildContext jobVertexBuildContext) {
switch (edge.getExchangeMode()) {
case PIPELINED:
return ResultPartitionType.PIPELINED_BOUNDED;
case BATCH:
return ResultPartitionType.BLOCKING;
case HYBRID_FULL:
return ResultPartitionType.HYBRID_FULL;
case HYBRID_SELECTIVE:
return ResultPartitionType.HYBRID_SELECTIVE;
case UNDEFINED:
return determineUndefinedResultPartitionType(edge, jobVertexBuildContext);
default:
throw new UnsupportedOperationException(
"Data exchange mode " + edge.getExchangeMode() + " is not supported yet.");
}
}
public static ResultPartitionType determineUndefinedResultPartitionType(
StreamEdge edge, JobVertexBuildContext jobVertexBuildContext) {
StreamGraph streamGraph = jobVertexBuildContext.getStreamGraph();
Attribute sourceNodeAttribute =
streamGraph.getStreamNode(edge.getSourceId()).getAttribute();
if (sourceNodeAttribute.isNoOutputUntilEndOfInput()) {
edge.setBufferTimeout(ExecutionOptions.DISABLED_NETWORK_BUFFER_TIMEOUT);
return ResultPartitionType.BLOCKING;
}
StreamPartitioner<?> partitioner = edge.getPartitioner();
switch (streamGraph.getGlobalStreamExchangeMode()) {
case ALL_EDGES_BLOCKING:
return ResultPartitionType.BLOCKING;
case FORWARD_EDGES_PIPELINED:
if (partitioner instanceof ForwardPartitioner) {
return ResultPartitionType.PIPELINED_BOUNDED;
} else {
return ResultPartitionType.BLOCKING;
}
case POINTWISE_EDGES_PIPELINED:
if (partitioner.isPointwise()) {
return ResultPartitionType.PIPELINED_BOUNDED;
} else {
return ResultPartitionType.BLOCKING;
}
case ALL_EDGES_PIPELINED:
return ResultPartitionType.PIPELINED_BOUNDED;
case ALL_EDGES_PIPELINED_APPROXIMATE:
return ResultPartitionType.PIPELINED_APPROXIMATE;
case ALL_EDGES_HYBRID_FULL:
return ResultPartitionType.HYBRID_FULL;
case ALL_EDGES_HYBRID_SELECTIVE:
return ResultPartitionType.HYBRID_SELECTIVE;
default:
throw new RuntimeException(
"Unrecognized global data exchange mode "
+ streamGraph.getGlobalStreamExchangeMode());
}
}
public static boolean isChainable(StreamEdge edge, StreamGraph streamGraph) {
return isChainable(edge, streamGraph, false);
}
public static boolean isChainable(
StreamEdge edge, StreamGraph streamGraph, boolean allowChainWithDefaultParallelism) {
StreamNode downStreamVertex = streamGraph.getTargetVertex(edge);
return downStreamVertex.getInEdges().size() == 1
&& isChainableInput(edge, streamGraph, allowChainWithDefaultParallelism);
}
public static boolean isChainableSource(StreamNode streamNode, StreamGraph streamGraph) {
if (streamNode.getOperatorFactory() == null
|| !(streamNode.getOperatorFactory() instanceof SourceOperatorFactory)
|| streamNode.getOutEdges().size() != 1) {
return false;
}
final StreamEdge sourceOutEdge = streamNode.getOutEdges().get(0);
final StreamNode target = streamGraph.getStreamNode(sourceOutEdge.getTargetId());
final ChainingStrategy targetChainingStrategy =
Preconditions.checkNotNull(target.getOperatorFactory()).getChainingStrategy();
return targetChainingStrategy == ChainingStrategy.HEAD_WITH_SOURCES
&& isChainableInput(sourceOutEdge, streamGraph, false);
}
private static boolean isChainableInput(
StreamEdge edge, StreamGraph streamGraph, boolean allowChainWithDefaultParallelism) {
StreamNode upStreamVertex = streamGraph.getSourceVertex(edge);
StreamNode downStreamVertex = streamGraph.getTargetVertex(edge);
if (!(streamGraph.isChainingEnabled()
&& upStreamVertex.isSameSlotSharingGroup(downStreamVertex)
&& areOperatorsChainable(
upStreamVertex,
downStreamVertex,
streamGraph,
allowChainWithDefaultParallelism)
&& arePartitionerAndExchangeModeChainable(
edge.getPartitioner(), edge.getExchangeMode(), streamGraph.isDynamic()))) {
return false;
}
// check that we do not have a union operation, because unions currently only work
// through the network/byte-channel stack.
// we check that by testing that each "type" (which means input position) is used only once
for (StreamEdge inEdge : downStreamVertex.getInEdges()) {
if (inEdge != edge && inEdge.getTypeNumber() == edge.getTypeNumber()) {
return false;
}
}
return true;
}
@VisibleForTesting
static boolean arePartitionerAndExchangeModeChainable(
StreamPartitioner<?> partitioner,
StreamExchangeMode exchangeMode,
boolean isDynamicGraph) {
if (partitioner instanceof ForwardForConsecutiveHashPartitioner) {
checkState(isDynamicGraph);
return true;
} else if ((partitioner instanceof ForwardPartitioner)
&& exchangeMode != StreamExchangeMode.BATCH) {
return true;
} else {
return false;
}
}
@VisibleForTesting
static boolean areOperatorsChainable(
StreamNode upStreamVertex,
StreamNode downStreamVertex,
StreamGraph streamGraph,
boolean allowChainWithDefaultParallelism) {
StreamOperatorFactory<?> upStreamOperator = upStreamVertex.getOperatorFactory();
StreamOperatorFactory<?> downStreamOperator = downStreamVertex.getOperatorFactory();
if (downStreamOperator == null || upStreamOperator == null) {
return false;
}
// yielding operators cannot be chained to legacy sources
// unfortunately the information that vertices have been chained is not preserved at this
// point
if (downStreamOperator instanceof YieldingOperatorFactory
&& getHeadOperator(upStreamVertex, streamGraph).isLegacySource()) {
return false;
}
// we use switch/case here to make sure this is exhaustive if ever values are added to the
// ChainingStrategy | can |
java | spring-projects__spring-security | messaging/src/main/java/org/springframework/security/messaging/context/SecurityContextPropagationChannelInterceptor.java | {
"start": 2379,
"end": 6461
} | class ____ implements ExecutorChannelInterceptor {
private static final ThreadLocal<Stack<SecurityContext>> originalContext = new ThreadLocal<>();
private SecurityContextHolderStrategy securityContextHolderStrategy = SecurityContextHolder
.getContextHolderStrategy();
private SecurityContext empty = this.securityContextHolderStrategy.createEmptyContext();
private final String authenticationHeaderName;
private Authentication anonymous = new AnonymousAuthenticationToken("key", "anonymous",
AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS"));
/**
* Create a new instance using the header of the name
* {@link SimpMessageHeaderAccessor#USER_HEADER}.
*/
public SecurityContextPropagationChannelInterceptor() {
this(SimpMessageHeaderAccessor.USER_HEADER);
}
/**
* Create a new instance that uses the specified header to populate the
* {@link Authentication}.
* @param authenticationHeaderName the header name to populate the
* {@link Authentication}. Cannot be null.
*/
public SecurityContextPropagationChannelInterceptor(String authenticationHeaderName) {
Assert.notNull(authenticationHeaderName, "authenticationHeaderName cannot be null");
this.authenticationHeaderName = authenticationHeaderName;
}
public void setSecurityContextHolderStrategy(SecurityContextHolderStrategy strategy) {
this.securityContextHolderStrategy = strategy;
this.empty = this.securityContextHolderStrategy.createEmptyContext();
}
/**
* Configure an Authentication used for anonymous authentication. Default is: <pre>
* new AnonymousAuthenticationToken("key", "anonymous",
* AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS"));
* </pre>
* @param authentication the Authentication used for anonymous authentication. Cannot
* be null.
*/
public void setAnonymousAuthentication(Authentication authentication) {
Assert.notNull(authentication, "authentication cannot be null");
this.anonymous = authentication;
}
@Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
Authentication authentication = this.securityContextHolderStrategy.getContext().getAuthentication();
if (authentication == null) {
authentication = this.anonymous;
}
return MessageBuilder.fromMessage(message).setHeader(this.authenticationHeaderName, authentication).build();
}
@Override
public Message<?> beforeHandle(Message<?> message, MessageChannel channel, MessageHandler handler) {
return postReceive(message, channel);
}
@Override
public Message<?> postReceive(Message<?> message, MessageChannel channel) {
setup(message);
return message;
}
@Override
public void afterMessageHandled(Message<?> message, MessageChannel channel, MessageHandler handler,
@Nullable Exception ex) {
cleanup();
}
private void setup(Message<?> message) {
Authentication authentication = message.getHeaders().get(this.authenticationHeaderName, Authentication.class);
SecurityContext currentContext = this.securityContextHolderStrategy.getContext();
Stack<SecurityContext> contextStack = originalContext.get();
if (contextStack == null) {
contextStack = new Stack<>();
originalContext.set(contextStack);
}
contextStack.push(currentContext);
SecurityContext context = this.securityContextHolderStrategy.createEmptyContext();
context.setAuthentication(authentication);
this.securityContextHolderStrategy.setContext(context);
}
private void cleanup() {
Stack<SecurityContext> contextStack = originalContext.get();
if (contextStack == null || contextStack.isEmpty()) {
this.securityContextHolderStrategy.clearContext();
originalContext.remove();
return;
}
SecurityContext context = contextStack.pop();
try {
if (this.empty.equals(context)) {
this.securityContextHolderStrategy.clearContext();
originalContext.remove();
}
else {
this.securityContextHolderStrategy.setContext(context);
}
}
catch (Throwable ex) {
this.securityContextHolderStrategy.clearContext();
}
}
}
| SecurityContextPropagationChannelInterceptor |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/fs/SafetyNetCloseableRegistry.java | {
"start": 5681,
"end": 7282
} | class ____
extends PhantomReference<WrappingProxyCloseable<? extends Closeable>>
implements Closeable {
private final Closeable innerCloseable;
private final SafetyNetCloseableRegistry closeableRegistry;
private final String debugString;
PhantomDelegatingCloseableRef(
WrappingProxyCloseable<? extends Closeable> referent,
SafetyNetCloseableRegistry closeableRegistry,
ReferenceQueue<? super WrappingProxyCloseable<? extends Closeable>> q) {
super(referent, q);
this.innerCloseable =
Preconditions.checkNotNull(WrappingProxyUtil.stripProxy(referent));
this.closeableRegistry = Preconditions.checkNotNull(closeableRegistry);
this.debugString = referent.toString();
}
String getDebugString() {
return debugString;
}
@Override
public void close() throws IOException {
// Mark sure the inner closeable is still registered and thus unclosed to
// prevent duplicated and concurrent closing from registry closing. This could
// happen if registry is closing after this phantom reference was enqueued.
if (closeableRegistry.removeCloseableInternal(innerCloseable)) {
LOG.warn("Closing unclosed resource via safety-net: {}", getDebugString());
innerCloseable.close();
}
}
}
/** Reaper runnable collects and closes leaking resources. */
static | PhantomDelegatingCloseableRef |
java | apache__camel | components/camel-dns/src/main/java/org/apache/camel/component/dns/policy/DnsActivationPolicy.java | {
"start": 1320,
"end": 6205
} | class ____ extends RoutePolicySupport {
private static final transient Logger LOG = LoggerFactory.getLogger(DnsActivationPolicy.class);
private ExceptionHandler exceptionHandler;
private DnsActivation dnsActivation;
private long ttl;
private boolean stopRoutesOnException;
private Map<String, Route> routes = new ConcurrentHashMap<>();
private Timer timer;
public DnsActivationPolicy() {
dnsActivation = new DnsActivation();
}
@Override
public void onInit(Route route) {
LOG.debug("onInit {}", route.getId());
routes.put(route.getId(), route);
}
@Override
public void onRemove(Route route) {
LOG.debug("onRemove {}", route.getId());
// noop
}
@Override
public void onStart(Route route) {
LOG.debug("onStart {}", route.getId());
// noop
}
@Override
public void onStop(Route route) {
LOG.debug("onStop {}", route.getId());
// noop
}
@Override
public void onSuspend(Route route) {
LOG.debug("onSuspend {}", route.getId());
// noop
}
@Override
public void onResume(Route route) {
LOG.debug("onResume {}", route.getId());
// noop
}
@Override
public void onExchangeBegin(Route route, Exchange exchange) {
LOG.debug("onExchange start {}/{}", route.getId(), exchange.getExchangeId());
// noop
}
@Override
public void onExchangeDone(Route route, Exchange exchange) {
LOG.debug("onExchange end {}/{}", route.getId(), exchange.getExchangeId());
// noop
}
@Override
protected void doStart() throws Exception {
LOG.debug("doStart");
timer = new Timer();
timer.schedule(new DnsActivationTask(), 0, ttl);
}
@Override
protected void doStop() throws Exception {
LOG.debug("doStop");
if (timer != null) {
timer.cancel();
timer = null;
}
}
@Override
public ExceptionHandler getExceptionHandler() {
if (exceptionHandler == null) {
exceptionHandler = new LoggingExceptionHandler(null, getClass());
}
return exceptionHandler;
}
@Override
public void setExceptionHandler(ExceptionHandler exceptionHandler) {
this.exceptionHandler = exceptionHandler;
}
public void setHostname(String hostname) {
dnsActivation.setHostname(hostname);
}
public String getHostname() {
return dnsActivation.getHostname();
}
public void setResolvesTo(List<String> resolvesTo) {
dnsActivation.setResolvesTo(resolvesTo);
}
public void setResolvesTo(String resolvesTo) {
dnsActivation.setResolvesTo(resolvesTo);
}
public List<String> getResolvesTo() {
return dnsActivation.getResolvesTo();
}
public void setTtl(long ttl) {
this.ttl = ttl;
}
public void setTtl(String ttl) {
this.ttl = Long.parseLong(ttl);
}
public long getTtl() {
return ttl;
}
public void setStopRoutesOnException(String stopRoutesOnException) {
this.stopRoutesOnException = Boolean.parseBoolean(stopRoutesOnException);
}
private void startRouteImpl(Route route) throws Exception {
ServiceStatus routeStatus = controller(route).getRouteStatus(route.getId());
if (routeStatus == ServiceStatus.Stopped) {
LOG.info("Starting {}", route.getId());
startRoute(route);
} else if (routeStatus == ServiceStatus.Suspended) {
LOG.info("Resuming {}", route.getId());
startConsumer(route.getConsumer());
} else {
LOG.debug("Nothing to do {} is {}", route.getId(), routeStatus);
}
}
private void startRoutes() {
for (Map.Entry<String, Route> entry : routes.entrySet()) {
try {
startRouteImpl(entry.getValue());
} catch (Exception e) {
LOG.warn(entry.getKey(), e);
}
}
}
private void stopRouteImpl(Route route) throws Exception {
ServiceStatus routeStatus = controller(route).getRouteStatus(route.getId());
if (routeStatus == ServiceStatus.Started) {
LOG.info("Stopping {}", route.getId());
stopRoute(route);
} else {
LOG.debug("Nothing to do {} is {}", route.getId(), routeStatus);
}
}
private void stopRoutes() {
for (Map.Entry<String, Route> routeEntry : routes.entrySet()) {
try {
stopRouteImpl(routeEntry.getValue());
} catch (Exception e) {
LOG.warn(routeEntry.getKey(), e);
}
}
}
protected boolean isActive() throws Exception {
return dnsActivation.isActive();
}
| DnsActivationPolicy |
java | redisson__redisson | redisson/src/main/java/org/redisson/QueueTransferTask.java | {
"start": 1255,
"end": 4927
} | class ____ {
private final long startTime;
private final Timeout task;
public TimeoutTask(long startTime, Timeout task) {
super();
this.startTime = startTime;
this.task = task;
}
public long getStartTime() {
return startTime;
}
public Timeout getTask() {
return task;
}
}
private volatile int usage = 1;
private final AtomicReference<TimeoutTask> lastTimeout = new AtomicReference<TimeoutTask>();
private final ServiceManager serviceManager;
public QueueTransferTask(ServiceManager serviceManager) {
super();
this.serviceManager = serviceManager;
}
public void incUsage() {
usage++;
}
public int decUsage() {
usage--;
return usage;
}
private int messageListenerId;
private int statusListenerId;
public void start() {
RTopic schedulerTopic = getTopic();
statusListenerId = schedulerTopic.addListener(new BaseStatusListener() {
@Override
public void onSubscribe(String channel) {
pushTask();
}
});
messageListenerId = schedulerTopic.addListener(Long.class, new MessageListener<Long>() {
@Override
public void onMessage(CharSequence channel, Long startTime) {
scheduleTask(startTime);
}
});
}
public void stop() {
RTopic schedulerTopic = getTopic();
schedulerTopic.removeListener(messageListenerId, statusListenerId);
TimeoutTask oldTimeout = lastTimeout.get();
if (oldTimeout != null) {
oldTimeout.getTask().cancel();
}
}
private void scheduleTask(final Long startTime) {
if (usage == 0) {
return;
}
if (startTime == null) {
return;
}
TimeoutTask oldTimeout = lastTimeout.get();
if (oldTimeout != null) {
oldTimeout.getTask().cancel();
}
long delay = startTime - System.currentTimeMillis();
if (delay > 10) {
Timeout timeout = serviceManager.newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
pushTask();
TimeoutTask currentTimeout = lastTimeout.get();
if (currentTimeout != null
&& currentTimeout.getTask() == timeout) {
lastTimeout.compareAndSet(currentTimeout, null);
}
}
}, delay, TimeUnit.MILLISECONDS);
lastTimeout.compareAndSet(oldTimeout, new TimeoutTask(startTime, timeout));
} else {
pushTask();
}
}
protected abstract RTopic getTopic();
protected abstract RFuture<Long> pushTaskAsync();
private void pushTask() {
if (usage == 0) {
return;
}
RFuture<Long> startTimeFuture = pushTaskAsync();
startTimeFuture.whenComplete((res, e) -> {
if (e != null) {
if (serviceManager.isShuttingDown(e)) {
return;
}
log.error(e.getMessage(), e);
scheduleTask(System.currentTimeMillis() + 5 * 1000L);
return;
}
if (res != null) {
scheduleTask(res);
}
});
}
}
| TimeoutTask |
java | google__dagger | javatests/artifacts/hilt-android/simple/deep-lib/src/main/java/dagger/hilt/android/deep/DeepLib.java | {
"start": 890,
"end": 1139
} | interface ____ {
DeepLib getDeepInstance();
}
@Inject
public DeepLib() {}
public static DeepLib getInstance(Object componentManager) {
return EntryPoints.get(componentManager, LibEntryPoint.class).getDeepInstance();
}
}
| LibEntryPoint |
java | spring-projects__spring-security | kerberos/kerberos-core/src/main/java/org/springframework/security/kerberos/authentication/KerberosAuthentication.java | {
"start": 696,
"end": 778
} | interface ____ {
JaasSubjectHolder getJaasSubjectHolder();
}
| KerberosAuthentication |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/layout/Log4j1SyslogLayout.java | {
"start": 2760,
"end": 8379
} | class ____<B extends Builder<B>> extends AbstractStringLayout.Builder<B>
implements org.apache.logging.log4j.core.util.Builder<Log4j1SyslogLayout> {
public Builder() {
setCharset(StandardCharsets.UTF_8);
}
@PluginBuilderAttribute
private Facility facility = Facility.USER;
@PluginBuilderAttribute
private boolean facilityPrinting;
@PluginBuilderAttribute
private boolean header;
@PluginElement("Layout")
private Layout<? extends Serializable> messageLayout;
@Override
public Log4j1SyslogLayout build() {
if (!isValid()) {
return null;
}
if (messageLayout != null && !(messageLayout instanceof StringLayout)) {
LOGGER.error("Log4j1SyslogLayout: the message layout must be a StringLayout.");
return null;
}
return new Log4j1SyslogLayout(
facility, facilityPrinting, header, (StringLayout) messageLayout, getCharset());
}
public Facility getFacility() {
return facility;
}
public boolean isFacilityPrinting() {
return facilityPrinting;
}
public boolean isHeader() {
return header;
}
public Layout<? extends Serializable> getMessageLayout() {
return messageLayout;
}
public B setFacility(final Facility facility) {
this.facility = facility;
return asBuilder();
}
public B setFacilityPrinting(final boolean facilityPrinting) {
this.facilityPrinting = facilityPrinting;
return asBuilder();
}
public B setHeader(final boolean header) {
this.header = header;
return asBuilder();
}
public B setMessageLayout(final Layout<? extends Serializable> messageLayout) {
this.messageLayout = messageLayout;
return asBuilder();
}
}
@PluginBuilderFactory
public static <B extends Builder<B>> B newBuilder() {
return new Builder<B>().asBuilder();
}
/**
* Host name used to identify messages from this appender.
*/
private static final String localHostname = NetUtils.getLocalHostname();
private final Facility facility;
private final boolean facilityPrinting;
private final boolean header;
private final StringLayout messageLayout;
/**
* Date format used if header = true.
*/
private static final String[] dateFormatOptions = {"MMM dd HH:mm:ss", null, "en"};
private final LogEventPatternConverter dateConverter = DatePatternConverter.newInstance(dateFormatOptions);
private Log4j1SyslogLayout(
final Facility facility,
final boolean facilityPrinting,
final boolean header,
final StringLayout messageLayout,
final Charset charset) {
super(charset);
this.facility = facility;
this.facilityPrinting = facilityPrinting;
this.header = header;
this.messageLayout = messageLayout;
}
/**
* Formats a {@link LogEvent} in conformance with the BSD Log record format.
*
* @param event The LogEvent
* @return the event formatted as a String.
*/
@Override
public String toSerializable(final LogEvent event) {
// The messageLayout also uses the thread-bound StringBuilder,
// so we generate the message first
final String message = messageLayout != null
? messageLayout.toSerializable(event)
: event.getMessage().getFormattedMessage();
final StringBuilder buf = getStringBuilder();
buf.append('<');
buf.append(Priority.getPriority(facility, event.getLevel()));
buf.append('>');
if (header) {
final int index = buf.length() + 4;
dateConverter.format(event, buf);
// RFC 3164 says leading space, not leading zero on days 1-9
if (buf.charAt(index) == '0') {
buf.setCharAt(index, Chars.SPACE);
}
buf.append(Chars.SPACE);
buf.append(localHostname);
buf.append(Chars.SPACE);
}
if (facilityPrinting) {
buf.append(facility != null ? toRootLowerCase(facility.name()) : "user")
.append(':');
}
buf.append(message);
// TODO: splitting message into 1024 byte chunks?
return buf.toString();
}
/**
* Gets this SyslogLayout's content format. Specified by:
* <ul>
* <li>Key: "structured" Value: "false"</li>
* <li>Key: "dateFormat" Value: "MMM dd HH:mm:ss"</li>
* <li>Key: "format" Value: "<LEVEL>TIMESTAMP PROP(HOSTNAME) MESSAGE"</li>
* <li>Key: "formatType" Value: "logfilepatternreceiver" (format uses the keywords supported by
* LogFilePatternReceiver)</li>
* </ul>
*
* @return Map of content format keys supporting SyslogLayout
*/
@Override
public Map<String, String> getContentFormat() {
final Map<String, String> result = new HashMap<>();
result.put("structured", "false");
result.put("formatType", "logfilepatternreceiver");
result.put("dateFormat", dateFormatOptions[0]);
if (header) {
result.put("format", "<LEVEL>TIMESTAMP PROP(HOSTNAME) MESSAGE");
} else {
result.put("format", "<LEVEL>MESSAGE");
}
return result;
}
}
| Builder |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/BoringSSLCertificateVerifyCallbackTask.java | {
"start": 775,
"end": 1480
} | class ____ extends BoringSSLTask {
private final byte[][] x509;
private final String authAlgorithm;
private final BoringSSLCertificateVerifyCallback verifier;
BoringSSLCertificateVerifyCallbackTask(long ssl, byte[][] x509, String authAlgorithm,
BoringSSLCertificateVerifyCallback verifier) {
super(ssl);
this.x509 = x509;
this.authAlgorithm = authAlgorithm;
this.verifier = verifier;
}
@Override
protected void runTask(long ssl, TaskCallback callback) {
int result = verifier.verify(ssl, x509, authAlgorithm);
callback.onResult(ssl, result);
}
}
| BoringSSLCertificateVerifyCallbackTask |
java | apache__kafka | server/src/main/java/org/apache/kafka/network/RequestConvertToJson.java | {
"start": 27802,
"end": 60424
} | class ____ {
public static JsonNode request(AbstractRequest request) {
return switch (request.apiKey()) {
case ADD_OFFSETS_TO_TXN ->
AddOffsetsToTxnRequestDataJsonConverter.write(((AddOffsetsToTxnRequest) request).data(), request.version());
case ADD_PARTITIONS_TO_TXN ->
AddPartitionsToTxnRequestDataJsonConverter.write(((AddPartitionsToTxnRequest) request).data(), request.version());
case ADD_RAFT_VOTER ->
AddRaftVoterRequestDataJsonConverter.write(((AddRaftVoterRequest) request).data(), request.version());
case ALLOCATE_PRODUCER_IDS ->
AllocateProducerIdsRequestDataJsonConverter.write(((AllocateProducerIdsRequest) request).data(), request.version());
case ALTER_CLIENT_QUOTAS ->
AlterClientQuotasRequestDataJsonConverter.write(((AlterClientQuotasRequest) request).data(), request.version());
case ALTER_CONFIGS ->
AlterConfigsRequestDataJsonConverter.write(((AlterConfigsRequest) request).data(), request.version());
case ALTER_PARTITION_REASSIGNMENTS ->
AlterPartitionReassignmentsRequestDataJsonConverter.write(((AlterPartitionReassignmentsRequest) request).data(), request.version());
case ALTER_PARTITION ->
AlterPartitionRequestDataJsonConverter.write(((AlterPartitionRequest) request).data(), request.version());
case ALTER_REPLICA_LOG_DIRS ->
AlterReplicaLogDirsRequestDataJsonConverter.write(((AlterReplicaLogDirsRequest) request).data(), request.version());
case ALTER_SHARE_GROUP_OFFSETS ->
AlterShareGroupOffsetsRequestDataJsonConverter.write(((AlterShareGroupOffsetsRequest) request).data(), request.version());
case ALTER_USER_SCRAM_CREDENTIALS ->
AlterUserScramCredentialsRequestDataJsonConverter.write(((AlterUserScramCredentialsRequest) request).data(), request.version());
case API_VERSIONS ->
ApiVersionsRequestDataJsonConverter.write(((ApiVersionsRequest) request).data(), request.version());
case ASSIGN_REPLICAS_TO_DIRS ->
AssignReplicasToDirsRequestDataJsonConverter.write(((AssignReplicasToDirsRequest) request).data(), request.version());
case BEGIN_QUORUM_EPOCH ->
BeginQuorumEpochRequestDataJsonConverter.write(((BeginQuorumEpochRequest) request).data(), request.version());
case BROKER_HEARTBEAT ->
BrokerHeartbeatRequestDataJsonConverter.write(((BrokerHeartbeatRequest) request).data(), request.version());
case BROKER_REGISTRATION ->
BrokerRegistrationRequestDataJsonConverter.write(((BrokerRegistrationRequest) request).data(), request.version());
case CONSUMER_GROUP_DESCRIBE ->
ConsumerGroupDescribeRequestDataJsonConverter.write(((ConsumerGroupDescribeRequest) request).data(), request.version());
case CONSUMER_GROUP_HEARTBEAT ->
ConsumerGroupHeartbeatRequestDataJsonConverter.write(((ConsumerGroupHeartbeatRequest) request).data(), request.version());
case CONTROLLER_REGISTRATION ->
ControllerRegistrationRequestDataJsonConverter.write(((ControllerRegistrationRequest) request).data(), request.version());
case CREATE_ACLS ->
CreateAclsRequestDataJsonConverter.write(((CreateAclsRequest) request).data(), request.version());
case CREATE_DELEGATION_TOKEN ->
CreateDelegationTokenRequestDataJsonConverter.write(((CreateDelegationTokenRequest) request).data(), request.version());
case CREATE_PARTITIONS ->
CreatePartitionsRequestDataJsonConverter.write(((CreatePartitionsRequest) request).data(), request.version());
case CREATE_TOPICS ->
CreateTopicsRequestDataJsonConverter.write(((CreateTopicsRequest) request).data(), request.version());
case DELETE_ACLS ->
DeleteAclsRequestDataJsonConverter.write(((DeleteAclsRequest) request).data(), request.version());
case DELETE_GROUPS ->
DeleteGroupsRequestDataJsonConverter.write(((DeleteGroupsRequest) request).data(), request.version());
case DELETE_RECORDS ->
DeleteRecordsRequestDataJsonConverter.write(((DeleteRecordsRequest) request).data(), request.version());
case DELETE_SHARE_GROUP_OFFSETS ->
DeleteShareGroupOffsetsRequestDataJsonConverter.write(((DeleteShareGroupOffsetsRequest) request).data(), request.version());
case DELETE_SHARE_GROUP_STATE ->
DeleteShareGroupStateRequestDataJsonConverter.write(((DeleteShareGroupStateRequest) request).data(), request.version());
case DELETE_TOPICS ->
DeleteTopicsRequestDataJsonConverter.write(((DeleteTopicsRequest) request).data(), request.version());
case DESCRIBE_ACLS ->
DescribeAclsRequestDataJsonConverter.write(((DescribeAclsRequest) request).data(), request.version());
case DESCRIBE_CLIENT_QUOTAS ->
DescribeClientQuotasRequestDataJsonConverter.write(((DescribeClientQuotasRequest) request).data(), request.version());
case DESCRIBE_CLUSTER ->
DescribeClusterRequestDataJsonConverter.write(((DescribeClusterRequest) request).data(), request.version());
case DESCRIBE_CONFIGS ->
DescribeConfigsRequestDataJsonConverter.write(((DescribeConfigsRequest) request).data(), request.version());
case DESCRIBE_DELEGATION_TOKEN ->
DescribeDelegationTokenRequestDataJsonConverter.write(((DescribeDelegationTokenRequest) request).data(), request.version());
case DESCRIBE_GROUPS ->
DescribeGroupsRequestDataJsonConverter.write(((DescribeGroupsRequest) request).data(), request.version());
case DESCRIBE_LOG_DIRS ->
DescribeLogDirsRequestDataJsonConverter.write(((DescribeLogDirsRequest) request).data(), request.version());
case DESCRIBE_PRODUCERS ->
DescribeProducersRequestDataJsonConverter.write(((DescribeProducersRequest) request).data(), request.version());
case DESCRIBE_QUORUM ->
DescribeQuorumRequestDataJsonConverter.write(((DescribeQuorumRequest) request).data(), request.version());
case DESCRIBE_SHARE_GROUP_OFFSETS ->
DescribeShareGroupOffsetsRequestDataJsonConverter.write(((DescribeShareGroupOffsetsRequest) request).data(), request.version());
case DESCRIBE_TOPIC_PARTITIONS ->
DescribeTopicPartitionsRequestDataJsonConverter.write(((DescribeTopicPartitionsRequest) request).data(), request.version());
case DESCRIBE_TRANSACTIONS ->
DescribeTransactionsRequestDataJsonConverter.write(((DescribeTransactionsRequest) request).data(), request.version());
case DESCRIBE_USER_SCRAM_CREDENTIALS ->
DescribeUserScramCredentialsRequestDataJsonConverter.write(((DescribeUserScramCredentialsRequest) request).data(), request.version());
case ELECT_LEADERS ->
ElectLeadersRequestDataJsonConverter.write(((ElectLeadersRequest) request).data(), request.version());
case END_QUORUM_EPOCH ->
EndQuorumEpochRequestDataJsonConverter.write(((EndQuorumEpochRequest) request).data(), request.version());
case END_TXN -> EndTxnRequestDataJsonConverter.write(((EndTxnRequest) request).data(), request.version());
case ENVELOPE ->
EnvelopeRequestDataJsonConverter.write(((EnvelopeRequest) request).data(), request.version());
case EXPIRE_DELEGATION_TOKEN ->
ExpireDelegationTokenRequestDataJsonConverter.write(((ExpireDelegationTokenRequest) request).data(), request.version());
case FETCH -> FetchRequestDataJsonConverter.write(((FetchRequest) request).data(), request.version());
case FETCH_SNAPSHOT ->
FetchSnapshotRequestDataJsonConverter.write(((FetchSnapshotRequest) request).data(), request.version());
case FIND_COORDINATOR ->
FindCoordinatorRequestDataJsonConverter.write(((FindCoordinatorRequest) request).data(), request.version());
case GET_TELEMETRY_SUBSCRIPTIONS ->
GetTelemetrySubscriptionsRequestDataJsonConverter.write(((GetTelemetrySubscriptionsRequest) request).data(), request.version());
case HEARTBEAT ->
HeartbeatRequestDataJsonConverter.write(((HeartbeatRequest) request).data(), request.version());
case INCREMENTAL_ALTER_CONFIGS ->
IncrementalAlterConfigsRequestDataJsonConverter.write(((IncrementalAlterConfigsRequest) request).data(), request.version());
case INITIALIZE_SHARE_GROUP_STATE ->
InitializeShareGroupStateRequestDataJsonConverter.write(((InitializeShareGroupStateRequest) request).data(), request.version());
case INIT_PRODUCER_ID ->
InitProducerIdRequestDataJsonConverter.write(((InitProducerIdRequest) request).data(), request.version());
case JOIN_GROUP ->
JoinGroupRequestDataJsonConverter.write(((JoinGroupRequest) request).data(), request.version());
case LEAVE_GROUP ->
LeaveGroupRequestDataJsonConverter.write(((LeaveGroupRequest) request).data(), request.version());
case LIST_CONFIG_RESOURCES ->
ListConfigResourcesRequestDataJsonConverter.write(((ListConfigResourcesRequest) request).data(), request.version());
case LIST_GROUPS ->
ListGroupsRequestDataJsonConverter.write(((ListGroupsRequest) request).data(), request.version());
case LIST_OFFSETS ->
ListOffsetsRequestDataJsonConverter.write(((ListOffsetsRequest) request).data(), request.version());
case LIST_PARTITION_REASSIGNMENTS ->
ListPartitionReassignmentsRequestDataJsonConverter.write(((ListPartitionReassignmentsRequest) request).data(), request.version());
case LIST_TRANSACTIONS ->
ListTransactionsRequestDataJsonConverter.write(((ListTransactionsRequest) request).data(), request.version());
case METADATA ->
MetadataRequestDataJsonConverter.write(((MetadataRequest) request).data(), request.version());
case OFFSET_COMMIT ->
OffsetCommitRequestDataJsonConverter.write(((OffsetCommitRequest) request).data(), request.version());
case OFFSET_DELETE ->
OffsetDeleteRequestDataJsonConverter.write(((OffsetDeleteRequest) request).data(), request.version());
case OFFSET_FETCH ->
OffsetFetchRequestDataJsonConverter.write(((OffsetFetchRequest) request).data(), request.version());
case OFFSET_FOR_LEADER_EPOCH ->
OffsetForLeaderEpochRequestDataJsonConverter.write(((OffsetsForLeaderEpochRequest) request).data(), request.version());
case PRODUCE ->
ProduceRequestDataJsonConverter.write(((ProduceRequest) request).data(), request.version(), false);
case PUSH_TELEMETRY ->
PushTelemetryRequestDataJsonConverter.write(((PushTelemetryRequest) request).data(), request.version());
case READ_SHARE_GROUP_STATE ->
ReadShareGroupStateRequestDataJsonConverter.write(((ReadShareGroupStateRequest) request).data(), request.version());
case READ_SHARE_GROUP_STATE_SUMMARY ->
ReadShareGroupStateSummaryRequestDataJsonConverter.write(((ReadShareGroupStateSummaryRequest) request).data(), request.version());
case REMOVE_RAFT_VOTER ->
RemoveRaftVoterRequestDataJsonConverter.write(((RemoveRaftVoterRequest) request).data(), request.version());
case RENEW_DELEGATION_TOKEN ->
RenewDelegationTokenRequestDataJsonConverter.write(((RenewDelegationTokenRequest) request).data(), request.version());
case SASL_AUTHENTICATE ->
SaslAuthenticateRequestDataJsonConverter.write(((SaslAuthenticateRequest) request).data(), request.version());
case SASL_HANDSHAKE ->
SaslHandshakeRequestDataJsonConverter.write(((SaslHandshakeRequest) request).data(), request.version());
case SHARE_ACKNOWLEDGE ->
ShareAcknowledgeRequestDataJsonConverter.write(((ShareAcknowledgeRequest) request).data(), request.version());
case SHARE_FETCH ->
ShareFetchRequestDataJsonConverter.write(((ShareFetchRequest) request).data(), request.version());
case SHARE_GROUP_DESCRIBE ->
ShareGroupDescribeRequestDataJsonConverter.write(((ShareGroupDescribeRequest) request).data(), request.version());
case SHARE_GROUP_HEARTBEAT ->
ShareGroupHeartbeatRequestDataJsonConverter.write(((ShareGroupHeartbeatRequest) request).data(), request.version());
case STREAMS_GROUP_DESCRIBE ->
StreamsGroupDescribeRequestDataJsonConverter.write(((StreamsGroupDescribeRequest) request).data(), request.version());
case STREAMS_GROUP_HEARTBEAT ->
StreamsGroupHeartbeatRequestDataJsonConverter.write(((StreamsGroupHeartbeatRequest) request).data(), request.version());
case SYNC_GROUP ->
SyncGroupRequestDataJsonConverter.write(((SyncGroupRequest) request).data(), request.version());
case TXN_OFFSET_COMMIT ->
TxnOffsetCommitRequestDataJsonConverter.write(((TxnOffsetCommitRequest) request).data(), request.version());
case UNREGISTER_BROKER ->
UnregisterBrokerRequestDataJsonConverter.write(((UnregisterBrokerRequest) request).data(), request.version());
case UPDATE_FEATURES ->
UpdateFeaturesRequestDataJsonConverter.write(((UpdateFeaturesRequest) request).data(), request.version());
case UPDATE_RAFT_VOTER ->
UpdateRaftVoterRequestDataJsonConverter.write(((UpdateRaftVoterRequest) request).data(), request.version());
case VOTE -> VoteRequestDataJsonConverter.write(((VoteRequest) request).data(), request.version());
case WRITE_SHARE_GROUP_STATE ->
WriteShareGroupStateRequestDataJsonConverter.write(((WriteShareGroupStateRequest) request).data(), request.version());
case WRITE_TXN_MARKERS ->
WriteTxnMarkersRequestDataJsonConverter.write(((WriteTxnMarkersRequest) request).data(), request.version());
default ->
throw new IllegalStateException("ApiKey " + request.apiKey() + " is not currently handled in `request`, the " +
"code should be updated to do so.");
};
}
public static JsonNode response(AbstractResponse response, short version) {
return switch (response.apiKey()) {
case ADD_OFFSETS_TO_TXN ->
AddOffsetsToTxnResponseDataJsonConverter.write(((AddOffsetsToTxnResponse) response).data(), version);
case ADD_PARTITIONS_TO_TXN ->
AddPartitionsToTxnResponseDataJsonConverter.write(((AddPartitionsToTxnResponse) response).data(), version);
case ADD_RAFT_VOTER ->
AddRaftVoterResponseDataJsonConverter.write(((AddRaftVoterResponse) response).data(), version);
case ALLOCATE_PRODUCER_IDS ->
AllocateProducerIdsResponseDataJsonConverter.write(((AllocateProducerIdsResponse) response).data(), version);
case ALTER_CLIENT_QUOTAS ->
AlterClientQuotasResponseDataJsonConverter.write(((AlterClientQuotasResponse) response).data(), version);
case ALTER_CONFIGS ->
AlterConfigsResponseDataJsonConverter.write(((AlterConfigsResponse) response).data(), version);
case ALTER_PARTITION_REASSIGNMENTS ->
AlterPartitionReassignmentsResponseDataJsonConverter.write(((AlterPartitionReassignmentsResponse) response).data(), version);
case ALTER_PARTITION ->
AlterPartitionResponseDataJsonConverter.write(((AlterPartitionResponse) response).data(), version);
case ALTER_REPLICA_LOG_DIRS ->
AlterReplicaLogDirsResponseDataJsonConverter.write(((AlterReplicaLogDirsResponse) response).data(), version);
case ALTER_SHARE_GROUP_OFFSETS ->
AlterShareGroupOffsetsResponseDataJsonConverter.write(((AlterShareGroupOffsetsResponse) response).data(), version);
case ALTER_USER_SCRAM_CREDENTIALS ->
AlterUserScramCredentialsResponseDataJsonConverter.write(((AlterUserScramCredentialsResponse) response).data(), version);
case API_VERSIONS ->
ApiVersionsResponseDataJsonConverter.write(((ApiVersionsResponse) response).data(), version);
case ASSIGN_REPLICAS_TO_DIRS ->
AssignReplicasToDirsResponseDataJsonConverter.write(((AssignReplicasToDirsResponse) response).data(), version);
case BEGIN_QUORUM_EPOCH ->
BeginQuorumEpochResponseDataJsonConverter.write(((BeginQuorumEpochResponse) response).data(), version);
case BROKER_HEARTBEAT ->
BrokerHeartbeatResponseDataJsonConverter.write(((BrokerHeartbeatResponse) response).data(), version);
case BROKER_REGISTRATION ->
BrokerRegistrationResponseDataJsonConverter.write(((BrokerRegistrationResponse) response).data(), version);
case CONSUMER_GROUP_DESCRIBE ->
ConsumerGroupDescribeResponseDataJsonConverter.write(((ConsumerGroupDescribeResponse) response).data(), version);
case CONSUMER_GROUP_HEARTBEAT ->
ConsumerGroupHeartbeatResponseDataJsonConverter.write(((ConsumerGroupHeartbeatResponse) response).data(), version);
case CONTROLLER_REGISTRATION ->
ControllerRegistrationResponseDataJsonConverter.write(((ControllerRegistrationResponse) response).data(), version);
case CREATE_ACLS ->
CreateAclsResponseDataJsonConverter.write(((CreateAclsResponse) response).data(), version);
case CREATE_DELEGATION_TOKEN ->
CreateDelegationTokenResponseDataJsonConverter.write(((CreateDelegationTokenResponse) response).data(), version);
case CREATE_PARTITIONS ->
CreatePartitionsResponseDataJsonConverter.write(((CreatePartitionsResponse) response).data(), version);
case CREATE_TOPICS ->
CreateTopicsResponseDataJsonConverter.write(((CreateTopicsResponse) response).data(), version);
case DELETE_ACLS ->
DeleteAclsResponseDataJsonConverter.write(((DeleteAclsResponse) response).data(), version);
case DELETE_GROUPS ->
DeleteGroupsResponseDataJsonConverter.write(((DeleteGroupsResponse) response).data(), version);
case DELETE_RECORDS ->
DeleteRecordsResponseDataJsonConverter.write(((DeleteRecordsResponse) response).data(), version);
case DELETE_SHARE_GROUP_OFFSETS ->
DeleteShareGroupOffsetsResponseDataJsonConverter.write(((DeleteShareGroupOffsetsResponse) response).data(), version);
case DELETE_SHARE_GROUP_STATE ->
DeleteShareGroupStateResponseDataJsonConverter.write(((DeleteShareGroupStateResponse) response).data(), version);
case DELETE_TOPICS ->
DeleteTopicsResponseDataJsonConverter.write(((DeleteTopicsResponse) response).data(), version);
case DESCRIBE_ACLS ->
DescribeAclsResponseDataJsonConverter.write(((DescribeAclsResponse) response).data(), version);
case DESCRIBE_CLIENT_QUOTAS ->
DescribeClientQuotasResponseDataJsonConverter.write(((DescribeClientQuotasResponse) response).data(), version);
case DESCRIBE_CLUSTER ->
DescribeClusterResponseDataJsonConverter.write(((DescribeClusterResponse) response).data(), version);
case DESCRIBE_CONFIGS ->
DescribeConfigsResponseDataJsonConverter.write(((DescribeConfigsResponse) response).data(), version);
case DESCRIBE_DELEGATION_TOKEN ->
DescribeDelegationTokenResponseDataJsonConverter.write(((DescribeDelegationTokenResponse) response).data(), version);
case DESCRIBE_GROUPS ->
DescribeGroupsResponseDataJsonConverter.write(((DescribeGroupsResponse) response).data(), version);
case DESCRIBE_LOG_DIRS ->
DescribeLogDirsResponseDataJsonConverter.write(((DescribeLogDirsResponse) response).data(), version);
case DESCRIBE_PRODUCERS ->
DescribeProducersResponseDataJsonConverter.write(((DescribeProducersResponse) response).data(), version);
case DESCRIBE_QUORUM ->
DescribeQuorumResponseDataJsonConverter.write(((DescribeQuorumResponse) response).data(), version);
case DESCRIBE_SHARE_GROUP_OFFSETS ->
DescribeShareGroupOffsetsResponseDataJsonConverter.write(((DescribeShareGroupOffsetsResponse) response).data(), version);
case DESCRIBE_TOPIC_PARTITIONS ->
DescribeTopicPartitionsResponseDataJsonConverter.write(((DescribeTopicPartitionsResponse) response).data(), version);
case DESCRIBE_TRANSACTIONS ->
DescribeTransactionsResponseDataJsonConverter.write(((DescribeTransactionsResponse) response).data(), version);
case DESCRIBE_USER_SCRAM_CREDENTIALS ->
DescribeUserScramCredentialsResponseDataJsonConverter.write(((DescribeUserScramCredentialsResponse) response).data(), version);
case ELECT_LEADERS ->
ElectLeadersResponseDataJsonConverter.write(((ElectLeadersResponse) response).data(), version);
case END_QUORUM_EPOCH ->
EndQuorumEpochResponseDataJsonConverter.write(((EndQuorumEpochResponse) response).data(), version);
case END_TXN -> EndTxnResponseDataJsonConverter.write(((EndTxnResponse) response).data(), version);
case ENVELOPE -> EnvelopeResponseDataJsonConverter.write(((EnvelopeResponse) response).data(), version);
case EXPIRE_DELEGATION_TOKEN ->
ExpireDelegationTokenResponseDataJsonConverter.write(((ExpireDelegationTokenResponse) response).data(), version);
case FETCH -> FetchResponseDataJsonConverter.write(((FetchResponse) response).data(), version, false);
case FETCH_SNAPSHOT ->
FetchSnapshotResponseDataJsonConverter.write(((FetchSnapshotResponse) response).data(), version);
case FIND_COORDINATOR ->
FindCoordinatorResponseDataJsonConverter.write(((FindCoordinatorResponse) response).data(), version);
case GET_TELEMETRY_SUBSCRIPTIONS ->
GetTelemetrySubscriptionsResponseDataJsonConverter.write(((GetTelemetrySubscriptionsResponse) response).data(), version);
case HEARTBEAT -> HeartbeatResponseDataJsonConverter.write(((HeartbeatResponse) response).data(), version);
case INCREMENTAL_ALTER_CONFIGS ->
IncrementalAlterConfigsResponseDataJsonConverter.write(((IncrementalAlterConfigsResponse) response).data(), version);
case INITIALIZE_SHARE_GROUP_STATE ->
InitializeShareGroupStateResponseDataJsonConverter.write(((InitializeShareGroupStateResponse) response).data(), version);
case INIT_PRODUCER_ID ->
InitProducerIdResponseDataJsonConverter.write(((InitProducerIdResponse) response).data(), version);
case JOIN_GROUP -> JoinGroupResponseDataJsonConverter.write(((JoinGroupResponse) response).data(), version);
case LEAVE_GROUP ->
LeaveGroupResponseDataJsonConverter.write(((LeaveGroupResponse) response).data(), version);
case LIST_CONFIG_RESOURCES ->
ListConfigResourcesResponseDataJsonConverter.write(((ListConfigResourcesResponse) response).data(), version);
case LIST_GROUPS ->
ListGroupsResponseDataJsonConverter.write(((ListGroupsResponse) response).data(), version);
case LIST_OFFSETS ->
ListOffsetsResponseDataJsonConverter.write(((ListOffsetsResponse) response).data(), version);
case LIST_PARTITION_REASSIGNMENTS ->
ListPartitionReassignmentsResponseDataJsonConverter.write(((ListPartitionReassignmentsResponse) response).data(), version);
case LIST_TRANSACTIONS ->
ListTransactionsResponseDataJsonConverter.write(((ListTransactionsResponse) response).data(), version);
case METADATA -> MetadataResponseDataJsonConverter.write(((MetadataResponse) response).data(), version);
case OFFSET_COMMIT ->
OffsetCommitResponseDataJsonConverter.write(((OffsetCommitResponse) response).data(), version);
case OFFSET_DELETE ->
OffsetDeleteResponseDataJsonConverter.write(((OffsetDeleteResponse) response).data(), version);
case OFFSET_FETCH ->
OffsetFetchResponseDataJsonConverter.write(((OffsetFetchResponse) response).data(), version);
case OFFSET_FOR_LEADER_EPOCH ->
OffsetForLeaderEpochResponseDataJsonConverter.write(((OffsetsForLeaderEpochResponse) response).data(), version);
case PRODUCE -> ProduceResponseDataJsonConverter.write(((ProduceResponse) response).data(), version);
case PUSH_TELEMETRY ->
PushTelemetryResponseDataJsonConverter.write(((PushTelemetryResponse) response).data(), version);
case READ_SHARE_GROUP_STATE ->
ReadShareGroupStateResponseDataJsonConverter.write(((ReadShareGroupStateResponse) response).data(), version);
case READ_SHARE_GROUP_STATE_SUMMARY ->
ReadShareGroupStateSummaryResponseDataJsonConverter.write(((ReadShareGroupStateSummaryResponse) response).data(), version);
case REMOVE_RAFT_VOTER ->
RemoveRaftVoterResponseDataJsonConverter.write(((RemoveRaftVoterResponse) response).data(), version);
case RENEW_DELEGATION_TOKEN ->
RenewDelegationTokenResponseDataJsonConverter.write(((RenewDelegationTokenResponse) response).data(), version);
case SASL_AUTHENTICATE ->
SaslAuthenticateResponseDataJsonConverter.write(((SaslAuthenticateResponse) response).data(), version);
case SASL_HANDSHAKE ->
SaslHandshakeResponseDataJsonConverter.write(((SaslHandshakeResponse) response).data(), version);
case SHARE_ACKNOWLEDGE ->
ShareAcknowledgeResponseDataJsonConverter.write(((ShareAcknowledgeResponse) response).data(), version);
case SHARE_FETCH ->
ShareFetchResponseDataJsonConverter.write(((ShareFetchResponse) response).data(), version);
case SHARE_GROUP_DESCRIBE ->
ShareGroupDescribeResponseDataJsonConverter.write(((ShareGroupDescribeResponse) response).data(), version);
case SHARE_GROUP_HEARTBEAT ->
ShareGroupHeartbeatResponseDataJsonConverter.write(((ShareGroupHeartbeatResponse) response).data(), version);
case STREAMS_GROUP_DESCRIBE ->
StreamsGroupDescribeResponseDataJsonConverter.write(((StreamsGroupDescribeResponse) response).data(), version);
case STREAMS_GROUP_HEARTBEAT ->
StreamsGroupHeartbeatResponseDataJsonConverter.write(((StreamsGroupHeartbeatResponse) response).data(), version);
case SYNC_GROUP -> SyncGroupResponseDataJsonConverter.write(((SyncGroupResponse) response).data(), version);
case TXN_OFFSET_COMMIT ->
TxnOffsetCommitResponseDataJsonConverter.write(((TxnOffsetCommitResponse) response).data(), version);
case UNREGISTER_BROKER ->
UnregisterBrokerResponseDataJsonConverter.write(((UnregisterBrokerResponse) response).data(), version);
case UPDATE_FEATURES ->
UpdateFeaturesResponseDataJsonConverter.write(((UpdateFeaturesResponse) response).data(), version);
case UPDATE_RAFT_VOTER ->
UpdateRaftVoterResponseDataJsonConverter.write(((UpdateRaftVoterResponse) response).data(), version);
case VOTE -> VoteResponseDataJsonConverter.write(((VoteResponse) response).data(), version);
case WRITE_SHARE_GROUP_STATE ->
WriteShareGroupStateResponseDataJsonConverter.write(((WriteShareGroupStateResponse) response).data(), version);
case WRITE_TXN_MARKERS ->
WriteTxnMarkersResponseDataJsonConverter.write(((WriteTxnMarkersResponse) response).data(), version);
default ->
throw new IllegalStateException("ApiKey " + response.apiKey() + " is not currently handled in `response`, the " +
"code should be updated to do so.");
};
}
public static JsonNode requestHeaderNode(RequestHeader header) {
ObjectNode node = (ObjectNode) RequestHeaderDataJsonConverter.write(
header.data(), header.headerVersion(), false
);
node.set("requestApiKeyName", new TextNode(header.apiKey().toString()));
if (header.isApiVersionDeprecated()) {
node.set("requestApiVersionDeprecated", BooleanNode.TRUE);
}
return node;
}
public static JsonNode requestDesc(RequestHeader header, Optional<JsonNode> requestNode, boolean isForwarded) {
ObjectNode node = JsonNodeFactory.instance.objectNode();
node.set("isForwarded", isForwarded ? BooleanNode.TRUE : BooleanNode.FALSE);
node.set("requestHeader", requestHeaderNode(header));
node.set("request", requestNode.orElse(new TextNode("")));
return node;
}
public static JsonNode clientInfoNode(ClientInformation clientInfo) {
ObjectNode node = JsonNodeFactory.instance.objectNode();
node.set("softwareName", new TextNode(clientInfo.softwareName()));
node.set("softwareVersion", new TextNode(clientInfo.softwareVersion()));
return node;
}
public static JsonNode requestDescMetrics(RequestHeader header, Optional<JsonNode> requestNode, Optional<JsonNode> responseNode,
RequestContext context, Session session, boolean isForwarded,
double totalTimeMs, double requestQueueTimeMs, double apiLocalTimeMs,
double apiRemoteTimeMs, long apiThrottleTimeMs, double responseQueueTimeMs,
double responseSendTimeMs, long temporaryMemoryBytes,
double messageConversionsTimeMs) {
ObjectNode node = (ObjectNode) requestDesc(header, requestNode, isForwarded);
node.set("response", responseNode.orElse(new TextNode("")));
node.set("connection", new TextNode(context.connectionId));
node.set("totalTimeMs", new DoubleNode(totalTimeMs));
node.set("requestQueueTimeMs", new DoubleNode(requestQueueTimeMs));
node.set("localTimeMs", new DoubleNode(apiLocalTimeMs));
node.set("remoteTimeMs", new DoubleNode(apiRemoteTimeMs));
node.set("throttleTimeMs", new LongNode(apiThrottleTimeMs));
node.set("responseQueueTimeMs", new DoubleNode(responseQueueTimeMs));
node.set("sendTimeMs", new DoubleNode(responseSendTimeMs));
node.set("securityProtocol", new TextNode(context.securityProtocol.toString()));
node.set("principal", new TextNode(session.principal.toString()));
node.set("listener", new TextNode(context.listenerName.value()));
node.set("clientInformation", clientInfoNode(context.clientInformation));
if (temporaryMemoryBytes > 0) {
node.set("temporaryMemoryBytes", new LongNode(temporaryMemoryBytes));
}
if (messageConversionsTimeMs > 0) {
node.set("messageConversionsTime", new DoubleNode(messageConversionsTimeMs));
}
return node;
}
}
| RequestConvertToJson |
java | spring-projects__spring-boot | module/spring-boot-ldap/src/main/java/org/springframework/boot/ldap/autoconfigure/LdapAutoConfiguration.java | {
"start": 2245,
"end": 4907
} | class ____ {
@Bean
@ConditionalOnMissingBean(LdapConnectionDetails.class)
PropertiesLdapConnectionDetails propertiesLdapConnectionDetails(LdapProperties properties,
Environment environment) {
return new PropertiesLdapConnectionDetails(properties, environment);
}
@Bean
@ConditionalOnMissingBean
LdapContextSource ldapContextSource(LdapConnectionDetails connectionDetails, LdapProperties properties,
ObjectProvider<DirContextAuthenticationStrategy> dirContextAuthenticationStrategy) {
LdapContextSource source = new LdapContextSource();
dirContextAuthenticationStrategy.ifUnique(source::setAuthenticationStrategy);
PropertyMapper propertyMapper = PropertyMapper.get();
propertyMapper.from(connectionDetails.getUsername()).to(source::setUserDn);
propertyMapper.from(connectionDetails.getPassword()).to(source::setPassword);
propertyMapper.from(properties.getAnonymousReadOnly()).to(source::setAnonymousReadOnly);
propertyMapper.from(properties.getReferral())
.as(((referral) -> referral.name().toLowerCase(Locale.ROOT)))
.to(source::setReferral);
propertyMapper.from(connectionDetails.getBase()).to(source::setBase);
propertyMapper.from(connectionDetails.getUrls()).to(source::setUrls);
propertyMapper.from(properties.getBaseEnvironment())
.to((baseEnvironment) -> source.setBaseEnvironmentProperties(Collections.unmodifiableMap(baseEnvironment)));
return source;
}
@Bean
@ConditionalOnMissingBean
ObjectDirectoryMapper objectDirectoryMapper() {
ApplicationConversionService conversionService = new ApplicationConversionService();
ConverterUtils.addDefaultConverters(conversionService);
DefaultObjectDirectoryMapper objectDirectoryMapper = new DefaultObjectDirectoryMapper();
objectDirectoryMapper.setConversionService(conversionService);
return objectDirectoryMapper;
}
@Bean
@ConditionalOnMissingBean(LdapOperations.class)
LdapTemplate ldapTemplate(LdapProperties properties, ContextSource contextSource,
ObjectDirectoryMapper objectDirectoryMapper) {
Template template = properties.getTemplate();
PropertyMapper propertyMapper = PropertyMapper.get();
LdapTemplate ldapTemplate = new LdapTemplate(contextSource);
ldapTemplate.setObjectDirectoryMapper(objectDirectoryMapper);
propertyMapper.from(template.isIgnorePartialResultException())
.to(ldapTemplate::setIgnorePartialResultException);
propertyMapper.from(template.isIgnoreNameNotFoundException()).to(ldapTemplate::setIgnoreNameNotFoundException);
propertyMapper.from(template.isIgnoreSizeLimitExceededException())
.to(ldapTemplate::setIgnoreSizeLimitExceededException);
return ldapTemplate;
}
}
| LdapAutoConfiguration |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java | {
"start": 17810,
"end": 18058
} | class ____ with names or classes where all classes are visible to Kafka classloader, context classloader is null
Thread.currentThread().setContextClassLoader(null);
ClassTestConfig.testOverrides();
// Test | overrides |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/SampleConnectorClientConfigOverridePolicy.java | {
"start": 952,
"end": 1212
} | class ____ extends NoneConnectorClientConfigOverridePolicy {
private boolean closed;
@Override
public void close() {
closed = true;
}
public boolean isClosed() {
return closed;
}
}
| SampleConnectorClientConfigOverridePolicy |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/client/support/RestClientProxyRegistryIntegrationTests.java | {
"start": 5548,
"end": 5692
} | class ____ extends ClientConfig {
}
@Configuration(proxyBeanMethods = false)
@Import(ManualListingRegistrar.class)
private static | DetectConfig |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/InjectMocks.java | {
"start": 5677,
"end": 6599
} | class ____ {
* private ArticleDatabase database;
* private ArticleCalculator calculator;
*
* ArticleManager(ArticleObserver observer, boolean flag) {
* // observer is not declared in the test above.
* // flag is not mockable anyway
* }
* }
* </code></pre>
* </p>
*
*
* <p>
* Again, note that @InjectMocks will only inject mocks/spies created using the @Spy or @Mock annotation.
* </p>
*
* <p>
* <strong><code>MockitoAnnotations.openMocks(this)</code></strong> method has to be called to initialize annotated objects.
* In above example, <code>openMocks()</code> is called in @Before (JUnit4) method of test's base class.
* <strong>Instead</strong> you can also put openMocks() in your JUnit runner (@RunWith) or use the built-in
* {@link MockitoJUnitRunner}. Also, make sure to release any mocks after disposing your test | ArticleManager |
java | google__auto | value/src/main/java/com/google/auto/value/extension/AutoValueExtension.java | {
"start": 22795,
"end": 23806
} | class ____ of the class
* {@code className} in the package {@link Context#packageName() context.packageName()}.
*
* <p>The returned string will typically look like this:
*
* <pre>{@code
* package <package>;
* ...
* <finalOrAbstract> class <className> extends <classToExtend> {
* // Constructor
* <className>(<constructorParameters>) {
* super(<constructorParameterNames>);
* ...
* }
* ...
* }
* }</pre>
*
* <p>Here, {@code <package>} is {@link Context#packageName()}; {@code <finalOrAbstract>} is the
* keyword {@code final} if {@code isFinal} is true or {@code abstract} otherwise; and {@code
* <className>} and {@code <classToExtend>} are the values of this method's parameters of the same
* name. The {@code <constructorParameters>} and {@code <constructorParameterNames>} are typically
* derived from {@link Context#propertyTypes()}.
*
* <p>An extension can also generate a subclass of the nested {@code Builder} | definition |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.