language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/source/MapConfigurationPropertySourceTests.java
|
{
"start": 1074,
"end": 3962
}
|
class ____ {
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenMapIsNullShouldThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> new MapConfigurationPropertySource(null))
.withMessageContaining("'map' must not be null");
}
@Test
void createWhenMapHasEntriesShouldAdaptMap() {
Map<Object, Object> map = new LinkedHashMap<>();
map.put("foo.BAR", "spring");
map.put(ConfigurationPropertyName.of("foo.baz"), "boot");
MapConfigurationPropertySource source = new MapConfigurationPropertySource(map);
assertThat(getValue(source, "foo.bar")).isEqualTo("spring");
assertThat(getValue(source, "foo.baz")).isEqualTo("boot");
}
@Test
@SuppressWarnings("NullAway") // Test null check
void putAllWhenMapIsNullShouldThrowException() {
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
assertThatIllegalArgumentException().isThrownBy(() -> source.putAll(null))
.withMessageContaining("'map' must not be null");
}
@Test
void putAllShouldPutEntries() {
Map<Object, Object> map = new LinkedHashMap<>();
map.put("foo.BAR", "spring");
map.put("foo.baz", "boot");
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
source.putAll(map);
assertThat(getValue(source, "foo.bar")).isEqualTo("spring");
assertThat(getValue(source, "foo.baz")).isEqualTo("boot");
}
@Test
void putShouldPutEntry() {
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
source.put("foo.bar", "baz");
assertThat(getValue(source, "foo.bar")).isEqualTo("baz");
}
@Test
void getConfigurationPropertyShouldGetFromMemory() {
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
source.put("foo.bar", "baz");
assertThat(getValue(source, "foo.bar")).isEqualTo("baz");
source.put("foo.bar", "big");
assertThat(getValue(source, "foo.bar")).isEqualTo("big");
}
@Test
void iteratorShouldGetFromMemory() {
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
source.put("foo.BAR", "spring");
source.put("foo.baz", "boot");
assertThat(source.iterator()).toIterable()
.containsExactly(ConfigurationPropertyName.of("foo.bar"), ConfigurationPropertyName.of("foo.baz"));
}
@Test
void streamShouldGetFromMemory() {
MapConfigurationPropertySource source = new MapConfigurationPropertySource();
source.put("foo.BAR", "spring");
source.put("foo.baz", "boot");
assertThat(source.stream()).containsExactly(ConfigurationPropertyName.of("foo.bar"),
ConfigurationPropertyName.of("foo.baz"));
}
private @Nullable Object getValue(ConfigurationPropertySource source, String name) {
ConfigurationProperty property = source.getConfigurationProperty(ConfigurationPropertyName.of(name));
return (property != null) ? property.getValue() : null;
}
}
|
MapConfigurationPropertySourceTests
|
java
|
hibernate__hibernate-orm
|
hibernate-spatial/src/main/java/org/hibernate/spatial/criteria/internal/GeolatteSpatialCriteriaBuilderImpl.java
|
{
"start": 356,
"end": 612
}
|
class ____ extends SpatialCriteriaBuilderImpl<Geometry<?>>
implements GeolatteSpatialCriteriaBuilder {
public GeolatteSpatialCriteriaBuilderImpl(HibernateCriteriaBuilder criteriaBuilder) {
super( criteriaBuilder );
}
}
|
GeolatteSpatialCriteriaBuilderImpl
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/ApplicationProperties.java
|
{
"start": 4624,
"end": 4914
}
|
class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
BindableRuntimeHintsRegistrar.forTypes(ApplicationProperties.class).registerHints(hints, classLoader);
}
}
}
|
ApplicationPropertiesRuntimeHints
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/erroneous/propertymapping/ErroneousMapper4.java
|
{
"start": 315,
"end": 476
}
|
interface ____ {
@BeanMapping( ignoreByDefault = true )
@Mapping( target = "property", source = "source" )
Target map(Source source);
}
|
ErroneousMapper4
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java
|
{
"start": 2813,
"end": 5360
}
|
class ____
implements CsiAdaptorProtocol, Closeable {
private final CsiAdaptorPB proxy;
public CsiAdaptorProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, CsiAdaptorPB.class, ProtobufRpcEngine2.class);
this.proxy = RPC.getProxy(CsiAdaptorPB.class, clientVersion, addr, conf);
}
@Override
public GetPluginInfoResponse getPluginInfo(
GetPluginInfoRequest request) throws YarnException, IOException {
CsiAdaptorProtos.GetPluginInfoRequest requestProto =
((GetPluginInfoRequestPBImpl) request).getProto();
try {
return new GetPluginInfoResponsePBImpl(
proxy.getPluginInfo(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ValidateVolumeCapabilitiesResponse validateVolumeCapacity(
ValidateVolumeCapabilitiesRequest request)
throws YarnException, IOException {
CsiAdaptorProtos.ValidateVolumeCapabilitiesRequest requestProto =
((ValidateVolumeCapabilitiesRequestPBImpl) request).getProto();
try {
return new ValidateVolumeCapabilitiesResponsePBImpl(
proxy.validateVolumeCapacity(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public NodePublishVolumeResponse nodePublishVolume(
NodePublishVolumeRequest request) throws IOException, YarnException {
CsiAdaptorProtos.NodePublishVolumeRequest requestProto =
((NodePublishVolumeRequestPBImpl) request).getProto();
try {
return new NodePublishVolumeResponsePBImpl(
proxy.nodePublishVolume(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public NodeUnpublishVolumeResponse nodeUnpublishVolume(
NodeUnpublishVolumeRequest request) throws YarnException, IOException {
CsiAdaptorProtos.NodeUnpublishVolumeRequest requestProto =
((NodeUnpublishVolumeRequestPBImpl) request).getProto();
try {
return new NodeUnpublishVolumeResponsePBImpl(
proxy.nodeUnpublishVolume(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public void close() throws IOException {
if(this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
}
|
CsiAdaptorProtocolPBClientImpl
|
java
|
spring-projects__spring-security
|
saml2/saml2-service-provider/src/test/java/org/springframework/security/saml2/provider/service/web/authentication/logout/Saml2RelyingPartyInitiatedLogoutSuccessHandlerTests.java
|
{
"start": 2199,
"end": 5547
}
|
class ____ {
Saml2LogoutRequestResolver logoutRequestResolver = mock(Saml2LogoutRequestResolver.class);
Saml2LogoutRequestRepository logoutRequestRepository = mock(Saml2LogoutRequestRepository.class);
Saml2RelyingPartyInitiatedLogoutSuccessHandler logoutRequestSuccessHandler = new Saml2RelyingPartyInitiatedLogoutSuccessHandler(
this.logoutRequestResolver);
@BeforeEach
public void setUp() {
this.logoutRequestSuccessHandler.setLogoutRequestRepository(this.logoutRequestRepository);
}
@AfterEach
public void tearDown() {
SecurityContextHolder.clearContext();
}
@Test
public void onLogoutSuccessWhenRedirectThenRedirectsToAssertingParty() throws Exception {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full().build();
Authentication authentication = authentication(registration);
SecurityContextHolder.getContext().setAuthentication(authentication);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
MockHttpServletRequest request = post("/saml2/logout").build();
MockHttpServletResponse response = new MockHttpServletResponse();
given(this.logoutRequestResolver.resolve(any(), any())).willReturn(logoutRequest);
this.logoutRequestSuccessHandler.onLogoutSuccess(request, response, authentication);
String content = response.getHeader("Location");
assertThat(content).contains(Saml2ParameterNames.SAML_REQUEST);
assertThat(content).startsWith(registration.getAssertingPartyMetadata().getSingleLogoutServiceLocation());
}
@Test
public void onLogoutSuccessWhenPostThenPostsToAssertingParty() throws Exception {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full()
.assertingPartyMetadata((party) -> party.singleLogoutServiceBinding(Saml2MessageBinding.POST))
.build();
Authentication authentication = authentication(registration);
SecurityContextHolder.getContext().setAuthentication(authentication);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
MockHttpServletRequest request = post("/saml2/logout").build();
MockHttpServletResponse response = new MockHttpServletResponse();
given(this.logoutRequestResolver.resolve(any(), any())).willReturn(logoutRequest);
this.logoutRequestSuccessHandler.onLogoutSuccess(request, response, authentication);
String content = response.getContentAsString();
assertThat(content).contains(Saml2ParameterNames.SAML_REQUEST);
assertThat(content).contains(registration.getAssertingPartyMetadata().getSingleLogoutServiceLocation());
assertThat(content).contains(
"<meta http-equiv=\"Content-Security-Policy\" content=\"script-src 'sha256-oZhLbc2kO8b8oaYLrUc7uye1MgVKMyLtPqWR4WtKF+c='\">");
assertThat(content).contains("<script>window.onload = function() { document.forms[0].submit(); }</script>");
}
private Saml2Authentication authentication(RelyingPartyRegistration registration) {
DefaultSaml2AuthenticatedPrincipal principal = new DefaultSaml2AuthenticatedPrincipal("user", new HashMap<>());
principal.setRelyingPartyRegistrationId(registration.getRegistrationId());
return new Saml2Authentication(principal, "response", new ArrayList<>());
}
}
|
Saml2RelyingPartyInitiatedLogoutSuccessHandlerTests
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java
|
{
"start": 37210,
"end": 42248
}
|
class
____ protocolConfigKey = producerOverrideKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG);
config.put(protocolConfigKey, "SASL_PLAINTEXT");
String maxRequestSizeConfigKey = producerOverrideKey(ProducerConfig.MAX_REQUEST_SIZE_CONFIG);
config.put(maxRequestSizeConfigKey, "420");
String maxBlockConfigKey = producerOverrideKey(ProducerConfig.MAX_BLOCK_MS_CONFIG);
config.put(maxBlockConfigKey, "28980");
String idempotenceConfigKey = producerOverrideKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG);
config.put(idempotenceConfigKey, "true");
String bootstrapServersConfigKey = producerOverrideKey(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
config.put(bootstrapServersConfigKey, "SASL_PLAINTEXT://localhost:12345,SASL_PLAINTEXT://localhost:23456");
String loginCallbackHandlerConfigKey = producerOverrideKey(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS);
config.put(loginCallbackHandlerConfigKey, OAuthBearerUnsecuredLoginCallbackHandler.class.getName());
final Set<String> overriddenClientConfigs = new HashSet<>();
overriddenClientConfigs.add(protocolConfigKey);
overriddenClientConfigs.add(maxRequestSizeConfigKey);
overriddenClientConfigs.add(maxBlockConfigKey);
overriddenClientConfigs.add(idempotenceConfigKey);
overriddenClientConfigs.add(bootstrapServersConfigKey);
overriddenClientConfigs.add(loginCallbackHandlerConfigKey);
ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false);
assertEquals(ConnectorType.SOURCE, herder.connectorType(config));
Map<String, String> validatedOverriddenClientConfigs = new HashMap<>();
for (ConfigInfo configInfo : result.configs()) {
String configName = configInfo.configKey().name();
if (overriddenClientConfigs.contains(configName)) {
validatedOverriddenClientConfigs.put(configName, configInfo.configValue().value());
}
}
Map<String, String> rawOverriddenClientConfigs = config.entrySet().stream()
.filter(e -> overriddenClientConfigs.contains(e.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
assertEquals(rawOverriddenClientConfigs, validatedOverriddenClientConfigs);
verifyValidationIsolation();
}
@Test
public void testConfigValidationAllowlistOverride() {
final Class<? extends Connector> connectorClass = SampleSourceConnector.class;
AllowlistConnectorClientConfigOverridePolicy policy = new AllowlistConnectorClientConfigOverridePolicy();
policy.configure(Map.of(AllowlistConnectorClientConfigOverridePolicy.ALLOWLIST_CONFIG, "acks"));
AbstractHerder herder = createConfigValidationHerder(connectorClass, policy);
Map<String, String> config = new HashMap<>();
config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName());
config.put(ConnectorConfig.NAME_CONFIG, "connector-name");
config.put("required", "value"); // connector required config
String ackConfigKey = producerOverrideKey(ProducerConfig.ACKS_CONFIG);
String saslConfigKey = producerOverrideKey(SaslConfigs.SASL_JAAS_CONFIG);
config.put(ackConfigKey, "none");
config.put(saslConfigKey, "jaas_config");
ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false);
assertEquals(ConnectorType.SOURCE, herder.connectorType(config));
// We expect there to be errors due to sasl.jaas.config not being allowed. Note that these assertions depend heavily on
// the config fields for SourceConnectorConfig, but we expect these to change rarely.
assertEquals(SampleSourceConnector.class.getName(), result.name());
// Each transform also gets its own group
List<String> expectedGroups = List.of(
ConnectorConfig.COMMON_GROUP,
ConnectorConfig.TRANSFORMS_GROUP,
ConnectorConfig.PREDICATES_GROUP,
ConnectorConfig.ERROR_GROUP,
SourceConnectorConfig.TOPIC_CREATION_GROUP,
SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_GROUP,
SourceConnectorConfig.OFFSETS_TOPIC_GROUP
);
assertEquals(expectedGroups, result.groups());
assertEquals(1, result.errorCount());
// Base connector config has 19 fields, connector's configs add 7, and 2 producer overrides
assertEquals(28, result.configs().size());
assertTrue(result.configs().stream().anyMatch(
configInfo -> ackConfigKey.equals(configInfo.configValue().name()) && configInfo.configValue().errors().isEmpty()));
assertTrue(result.configs().stream().anyMatch(
configInfo -> saslConfigKey.equals(configInfo.configValue().name()) && !configInfo.configValue().errors().isEmpty()));
verifyValidationIsolation();
}
static final
|
String
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/type/TypeFactoryTest.java
|
{
"start": 1964,
"end": 2183
}
|
class ____ {
// self-reference; should be resolved as "Comparable<Object>"
public <T extends Comparable<T>> T getFoobar() { return null; }
}
@SuppressWarnings("serial")
public static
|
SneakyBean2
|
java
|
apache__maven
|
compat/maven-builder-support/src/main/java/org/apache/maven/building/ProblemCollectorFactory.java
|
{
"start": 1009,
"end": 1399
}
|
class ____ {
/**
* The default implementation is not visible, create it with this factory
*
* @param problems starting set of problems, may be {@code null}
* @return a new instance of a ProblemCollector
*/
public static ProblemCollector newInstance(List<Problem> problems) {
return new DefaultProblemCollector(problems);
}
}
|
ProblemCollectorFactory
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/ir/BreakNode.java
|
{
"start": 616,
"end": 1097
}
|
class ____ extends StatementNode {
/* ---- begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitBreak(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
// do nothing; terminal node
}
/* ---- end visitor ---- */
public BreakNode(Location location) {
super(location);
}
}
|
BreakNode
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AStoreImpl.java
|
{
"start": 36437,
"end": 36897
}
|
class ____ implements StreamFactoryCallbacks {
@Override
public S3Client getOrCreateSyncClient() throws IOException {
LOG.debug("Stream factory requested sync client");
return clientManager().getOrCreateS3Client();
}
@Override
public void incrementFactoryStatistic(Statistic statistic) {
incrementStatistic(statistic);
}
}
/*
=============== END ObjectInputStreamFactory ===============
*/
}
|
FactoryCallbacks
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/builder/EqualsBuilder.java
|
{
"start": 33578,
"end": 34861
}
|
class ____ being excluded, call normal object equals method on lhsClass.
if (bypassReflectionClasses != null
&& (bypassReflectionClasses.contains(lhsClass) || bypassReflectionClasses.contains(rhsClass))) {
isEquals = lhs.equals(rhs);
} else {
reflectionAppend(lhs, rhs, testClass);
while (testClass.getSuperclass() != null && testClass != reflectUpToClass) {
testClass = testClass.getSuperclass();
reflectionAppend(lhs, rhs, testClass);
}
}
} catch (final IllegalArgumentException e) {
// In this case, we tried to test a subclass vs. a superclass and
// the subclass has ivars or the ivars are transient and
// we are testing transients.
// If a subclass has ivars that we are trying to test them, we get an
// exception and we know that the objects are not equal.
isEquals = false;
}
return this;
}
/**
* Appends the fields and values defined by the given object of the
* given Class.
*
* @param lhs the left-hand side object
* @param rhs the right-hand side object
* @param clazz the
|
is
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
|
{
"start": 21311,
"end": 22476
}
|
class ____ {
static final String KEY_FIELD_DELIMITER = "*";
String statName;
String dateTime;
String taskType;
IntervalKey(String stat, long timeMSec, String taskType) {
statName = stat;
SimpleDateFormat dateF = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
dateTime = dateF.format(new Date(timeMSec));
this.taskType = taskType;
}
IntervalKey(String key) {
StringTokenizer keyTokens = new StringTokenizer(key, KEY_FIELD_DELIMITER);
if(!keyTokens.hasMoreTokens()) return;
statName = keyTokens.nextToken();
if(!keyTokens.hasMoreTokens()) return;
dateTime = keyTokens.nextToken();
if(!keyTokens.hasMoreTokens()) return;
taskType = keyTokens.nextToken();
}
void setStatName(String stat) {
statName = stat;
}
String getStringKey() {
return statName + KEY_FIELD_DELIMITER +
dateTime + KEY_FIELD_DELIMITER +
taskType;
}
Text getTextKey() {
return new Text(getStringKey());
}
public String toString() {
return getStringKey();
}
}
/**
* Mapper class.
*/
private static
|
IntervalKey
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/CustomHttpOptionsViaProgrammaticallyClientCreatedTest.java
|
{
"start": 627,
"end": 2477
}
|
class ____ {
private static final String EXPECTED_VALUE = "success";
@TestHTTPResource
URI baseUri;
@RegisterExtension
static final QuarkusUnitTest app = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(Client.class));
@Test
void shouldUseCustomHttpOptionsUsingProvider() {
// First verify the standard configuration
assertThat(QuarkusRestClientBuilder.newBuilder().baseUri(baseUri).build(Client.class).get())
.isEqualTo(EXPECTED_VALUE);
// Now, it should fail if we use a custom http client options with a very limited max header size:
Client client = QuarkusRestClientBuilder.newBuilder().baseUri(baseUri)
.register(CustomHttpClientOptionsWithLimit.class)
.build(Client.class);
assertThatThrownBy(() -> client.get()).hasMessageContaining("HTTP header is larger than 1 bytes.");
}
@Test
void shouldUseCustomHttpOptionsUsingAPI() {
// First verify the standard configuration
assertThat(QuarkusRestClientBuilder.newBuilder().baseUri(baseUri).build(Client.class).get())
.isEqualTo(EXPECTED_VALUE);
// Now, it should fail if we use a custom http client options with a very limited max header size:
HttpClientOptions options = new HttpClientOptions();
options.setMaxHeaderSize(1); // this is just to verify that this HttpClientOptions is indeed used.
Client client = QuarkusRestClientBuilder.newBuilder().baseUri(baseUri)
.httpClientOptions(options)
.build(Client.class);
assertThatThrownBy(() -> client.get()).hasMessageContaining("HTTP header is larger than 1 bytes.");
}
@Path("/")
@ApplicationScoped
public static
|
CustomHttpOptionsViaProgrammaticallyClientCreatedTest
|
java
|
elastic__elasticsearch
|
modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java
|
{
"start": 1028,
"end": 2061
}
|
class ____ extends ESIntegTestCase {
private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false;
@ClassRule
public static final GeoIpHttpFixture fixture = new GeoIpHttpFixture(useFixture);
protected String getEndpoint() {
return useFixture ? fixture.getAddress() : null;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(IngestGeoIpPlugin.class, IngestGeoIpSettingsPlugin.class);
}
@Override
protected Settings nodeSettings(final int nodeOrdinal, final Settings otherSettings) {
final Path databasePath = createTempDir();
copyDefaultDatabases(databasePath);
return Settings.builder()
.put("ingest.geoip.database_path", databasePath)
.put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), false)
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.build();
}
public static
|
AbstractGeoIpIT
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client-jaxrs/deployment/src/main/java/io/quarkus/jaxrs/client/reactive/deployment/JaxrsClientReactiveProcessor.java
|
{
"start": 219594,
"end": 221124
}
|
class ____ {
final MethodParameter methodParameter;
final String typeName;
final Type type;
final FieldDescriptor field;
final Supplier<FieldDescriptor> paramAnnotationsField;
final Supplier<FieldDescriptor> genericsParametersField;
final int paramIndex;
private SubResourceParameter(MethodParameter methodParameter, String typeName, Type type, FieldDescriptor field,
Supplier<FieldDescriptor> paramAnnotationsField, Supplier<FieldDescriptor> genericsParametersField,
int paramIndex) {
this.methodParameter = methodParameter;
this.typeName = typeName;
this.type = type;
this.field = field;
this.paramAnnotationsField = paramAnnotationsField;
this.genericsParametersField = genericsParametersField;
this.paramIndex = paramIndex;
}
}
private record GeneratedSubResourceKey(Type returnType,
List<SubResourceMethodParameterKeyPart> ownerSubResourceMethodParameters,
List<SubResourceMethodParameterKeyPart> subResourceMethodParameters) {
}
private record SubResourceMethodParameterKeyPart(MethodParameter methodParameter, int paramIndex) {
static SubResourceMethodParameterKeyPart of(SubResourceParameter subResourceParameter) {
return new SubResourceMethodParameterKeyPart(subResourceParameter.methodParameter, subResourceParameter.paramIndex);
}
}
}
|
SubResourceParameter
|
java
|
apache__camel
|
components/camel-sjms/src/test/java/org/apache/camel/component/sjms/SjmsEndpointTest.java
|
{
"start": 1541,
"end": 6876
}
|
class ____ extends CamelTestSupport {
@Override
protected boolean useJmx() {
return true;
}
@Test
public void testDefaults() {
Endpoint endpoint = context.getEndpoint("sjms:test.SjmsEndpointTest");
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint sjms = (SjmsEndpoint) endpoint;
assertEquals("sjms://test.SjmsEndpointTest", sjms.getEndpointUri());
assertEquals(ExchangePattern.InOnly, sjms.createExchange().getPattern());
}
@Test
public void testQueueEndpoint() {
Endpoint sjms = context.getEndpoint("sjms:queue:test.SjmsEndpointTest");
assertNotNull(sjms);
assertEquals("sjms://queue:test.SjmsEndpointTest", sjms.getEndpointUri());
assertTrue(sjms instanceof SjmsEndpoint);
}
@Test
public void testJndiStyleEndpointName() {
SjmsEndpoint sjms = context.getEndpoint("sjms:/jms/test/hov.t1.dev:topic.SjmsEndpointTest", SjmsEndpoint.class);
assertNotNull(sjms);
assertFalse(sjms.isTopic());
assertEquals("/jms/test/hov.t1.dev:topic.SjmsEndpointTest", sjms.getDestinationName());
}
@Test
public void testSetTransacted() {
Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?transacted=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertTrue(qe.isTransacted());
}
@Test
public void testAsyncProducer() {
Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?synchronous=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertTrue(qe.isSynchronous());
}
@Test
public void testReplyTo() {
String replyTo = "reply.to.queue";
Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?replyTo=" + replyTo);
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertEquals(qe.getReplyTo(), replyTo);
assertEquals(ExchangePattern.InOut, qe.createExchange().getPattern());
}
@Test
public void testDefaultExchangePattern() {
try {
SjmsEndpoint sjms = (SjmsEndpoint) context.getEndpoint("sjms:queue:test.SjmsEndpointTest");
assertNotNull(sjms);
assertEquals(ExchangePattern.InOnly, sjms.getExchangePattern());
// assertTrue(sjms.createExchange().getPattern().equals(ExchangePattern.InOnly));
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testInOnlyExchangePattern() {
try {
Endpoint sjms = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?exchangePattern=" + ExchangePattern.InOnly);
assertNotNull(sjms);
assertEquals(ExchangePattern.InOnly, sjms.createExchange().getPattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testInOutExchangePattern() {
try {
Endpoint sjms = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?exchangePattern=" + ExchangePattern.InOut);
assertNotNull(sjms);
assertEquals(ExchangePattern.InOut, sjms.createExchange().getPattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
@Test
public void testUnsupportedMessageExchangePattern() {
assertThrows(ResolveEndpointFailedException.class,
() -> context.getEndpoint("sjms:queue:test2.SjmsEndpointTest?messageExchangePattern=OutOnly"));
}
@Test
public void testReplyToAndMEPMatch() {
String replyTo = "reply.to.queue";
Endpoint endpoint = context
.getEndpoint(
"sjms:queue:test.SjmsEndpointTest?replyTo=" + replyTo + "&exchangePattern=" + ExchangePattern.InOut);
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertEquals(qe.getReplyTo(), replyTo);
assertEquals(ExchangePattern.InOut, qe.createExchange().getPattern());
}
@Test
public void testDestinationName() {
Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?synchronous=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertTrue(qe.isSynchronous());
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext camelContext = super.createCamelContext();
ActiveMQConnectionFactory connectionFactory
= new ActiveMQConnectionFactory("vm://broker?broker.persistent=false&broker.useJmx=false");
SjmsComponent component = new SjmsComponent();
component.setConnectionFactory(connectionFactory);
camelContext.addComponent("sjms", component);
return camelContext;
}
}
|
SjmsEndpointTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/IncorrectMainMethodTest.java
|
{
"start": 4740,
"end": 5059
}
|
class ____ {
static void main(Object[] args) {}
}
""")
.doTest();
}
@Test
public void removePrivate() {
assume().that(Runtime.version().feature()).isLessThan(25);
refactoringHelper
.addInputLines(
"Test.java",
"""
|
Test
|
java
|
google__jimfs
|
jimfs/src/main/java/com/google/common/jimfs/Java8Compatibility.java
|
{
"start": 800,
"end": 915
}
|
class ____ {
static void clear(Buffer b) {
b.clear();
}
private Java8Compatibility() {}
}
|
Java8Compatibility
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/JUnit4TestCaseWithRunnerWithDuplicateChangingChildDescriptions.java
|
{
"start": 665,
"end": 1704
}
|
class ____ extends org.junit.runner.Runner {
private final Class<?> testClass;
public Runner(Class<?> testClass) {
this.testClass = testClass;
}
@Override
public Description getDescription() {
var suiteDescription = Description.createSuiteDescription(testClass);
suiteDescription.addChild(getContainerDescription("1st"));
suiteDescription.addChild(getContainerDescription("2nd"));
return suiteDescription;
}
private Description getContainerDescription(String name) {
var parent = Description.createSuiteDescription(name);
parent.addChild(getLeafDescription());
parent.addChild(getLeafDescription());
return parent;
}
private Description getLeafDescription() {
return Description.createTestDescription(testClass, "leaf");
}
@Override
public void run(RunNotifier notifier) {
for (var i = 0; i < 2; i++) {
notifier.fireTestIgnored(getLeafDescription());
notifier.fireTestStarted(getLeafDescription());
notifier.fireTestFinished(getLeafDescription());
}
}
}
}
|
Runner
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/data/FileDescriptorAssetPathFetcher.java
|
{
"start": 285,
"end": 890
}
|
class ____ extends AssetPathFetcher<AssetFileDescriptor> {
public FileDescriptorAssetPathFetcher(AssetManager assetManager, String assetPath) {
super(assetManager, assetPath);
}
@Override
protected AssetFileDescriptor loadResource(AssetManager assetManager, String path)
throws IOException {
return assetManager.openFd(path);
}
@Override
protected void close(AssetFileDescriptor data) throws IOException {
data.close();
}
@NonNull
@Override
public Class<AssetFileDescriptor> getDataClass() {
return AssetFileDescriptor.class;
}
}
|
FileDescriptorAssetPathFetcher
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/samples/spr/CustomRequestAttributesRequestContextHolderTests.java
|
{
"start": 4650,
"end": 5406
}
|
class ____ {
@RequestMapping("/singletonController")
public void handle() {
assertRequestAttributes();
}
}
private static void assertRequestAttributes() {
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
assertThat(requestAttributes).isInstanceOf(ServletRequestAttributes.class);
assertRequestAttributes(((ServletRequestAttributes) requestAttributes).getRequest());
}
private static void assertRequestAttributes(ServletRequest request) {
assertThat(request.getAttribute(FROM_CUSTOM_MOCK)).isNull();
assertThat(request.getAttribute(FROM_MVC_TEST_DEFAULT)).isEqualTo(FROM_MVC_TEST_DEFAULT);
assertThat(request.getAttribute(FROM_MVC_TEST_MOCK)).isEqualTo(FROM_MVC_TEST_MOCK);
}
}
|
SingletonController
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
|
{
"start": 2298,
"end": 11159
}
|
class ____ extends ReplicaInfo {
public static final Logger LOG =
LoggerFactory.getLogger(ProvidedReplica.class);
// Null checksum information for provided replicas.
// Shared across all replicas.
static final byte[] NULL_CHECKSUM_ARRAY =
FsDatasetUtil.createNullChecksumByteArray();
private URI fileURI;
private Path pathPrefix;
private String pathSuffix;
private long fileOffset;
private Configuration conf;
private PathHandle pathHandle;
private FileSystem remoteFS;
/**
* Constructor.
*
* @param blockId block id
* @param fileURI remote URI this block is to be read from
* @param fileOffset the offset in the remote URI
* @param blockLen the length of the block
* @param genStamp the generation stamp of the block
* @param volume the volume this block belongs to
* @param conf the configuration
* @param remoteFS reference to the remote filesystem to use for this replica.
*/
public ProvidedReplica(long blockId, URI fileURI, long fileOffset,
long blockLen, long genStamp, PathHandle pathHandle, FsVolumeSpi volume,
Configuration conf, FileSystem remoteFS) {
super(volume, blockId, blockLen, genStamp);
this.fileURI = fileURI;
this.fileOffset = fileOffset;
this.conf = conf;
this.pathHandle = pathHandle;
if (remoteFS != null) {
this.remoteFS = remoteFS;
} else {
LOG.warn(
"Creating an reference to the remote FS for provided block " + this);
try {
this.remoteFS = FileSystem.get(fileURI, this.conf);
} catch (IOException e) {
LOG.warn("Failed to obtain filesystem for " + fileURI);
this.remoteFS = null;
}
}
}
/**
* Constructor.
*
* @param blockId block id
* @param pathPrefix A prefix of the {@link Path} associated with this replica
* on the remote {@link FileSystem}.
* @param pathSuffix A suffix of the {@link Path} associated with this replica
* on the remote {@link FileSystem}. Resolving the {@code pathSuffix}
* against the {@code pathPrefix} should provide the exact
* {@link Path} of the data associated with this replica on the
* remote {@link FileSystem}.
* @param fileOffset the offset in the remote URI
* @param blockLen the length of the block
* @param genStamp the generation stamp of the block
* @param volume the volume this block belongs to
* @param conf the configuration
* @param remoteFS reference to the remote filesystem to use for this replica.
*/
public ProvidedReplica(long blockId, Path pathPrefix, String pathSuffix,
long fileOffset, long blockLen, long genStamp, PathHandle pathHandle,
FsVolumeSpi volume, Configuration conf, FileSystem remoteFS) {
super(volume, blockId, blockLen, genStamp);
this.fileURI = null;
this.pathPrefix = pathPrefix;
this.pathSuffix = pathSuffix;
this.fileOffset = fileOffset;
this.conf = conf;
this.pathHandle = pathHandle;
if (remoteFS != null) {
this.remoteFS = remoteFS;
} else {
LOG.warn(
"Creating an reference to the remote FS for provided block " + this);
try {
this.remoteFS = FileSystem.get(pathPrefix.toUri(), this.conf);
} catch (IOException e) {
LOG.warn("Failed to obtain filesystem for " + pathPrefix);
this.remoteFS = null;
}
}
}
public ProvidedReplica(ProvidedReplica r) {
super(r);
this.fileURI = r.fileURI;
this.fileOffset = r.fileOffset;
this.conf = r.conf;
this.remoteFS = r.remoteFS;
this.pathHandle = r.pathHandle;
this.pathPrefix = r.pathPrefix;
this.pathSuffix = r.pathSuffix;
}
@Override
public URI getBlockURI() {
return getRemoteURI();
}
@VisibleForTesting
public String getPathSuffix() {
return pathSuffix;
}
@VisibleForTesting
public Path getPathPrefix() {
return pathPrefix;
}
private URI getRemoteURI() {
if (fileURI != null) {
return fileURI;
} else if (pathPrefix == null) {
return new Path(pathSuffix).toUri();
} else {
return new Path(pathPrefix, pathSuffix).toUri();
}
}
@Override
public InputStream getDataInputStream(long seekOffset) throws IOException {
if (remoteFS != null) {
FSDataInputStream ins;
try {
if (pathHandle != null) {
ins = remoteFS.open(pathHandle, conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT));
} else {
ins = remoteFS.open(new Path(getRemoteURI()));
}
} catch (UnsupportedOperationException e) {
throw new IOException("PathHandle specified, but unsuported", e);
}
ins.seek(fileOffset + seekOffset);
return new BoundedInputStream(
new FSDataInputStream(ins), getBlockDataLength());
} else {
throw new IOException("Remote filesystem for provided replica " + this +
" does not exist");
}
}
@Override
public OutputStream getDataOutputStream(boolean append) throws IOException {
throw new UnsupportedOperationException(
"OutputDataStream is not implemented for ProvidedReplica");
}
@Override
public URI getMetadataURI() {
return null;
}
@Override
public OutputStream getMetadataOutputStream(boolean append)
throws IOException {
return null;
}
@Override
public boolean blockDataExists() {
if(remoteFS != null) {
try {
return remoteFS.exists(new Path(getRemoteURI()));
} catch (IOException e) {
return false;
}
} else {
return false;
}
}
@Override
public boolean deleteBlockData() {
throw new UnsupportedOperationException(
"ProvidedReplica does not support deleting block data");
}
@Override
public long getBlockDataLength() {
return this.getNumBytes();
}
@Override
public LengthInputStream getMetadataInputStream(long offset)
throws IOException {
return new LengthInputStream(new ByteArrayInputStream(NULL_CHECKSUM_ARRAY),
NULL_CHECKSUM_ARRAY.length);
}
@Override
public boolean metadataExists() {
return NULL_CHECKSUM_ARRAY == null ? false : true;
}
@Override
public boolean deleteMetadata() {
throw new UnsupportedOperationException(
"ProvidedReplica does not support deleting metadata");
}
@Override
public long getMetadataLength() {
return NULL_CHECKSUM_ARRAY == null ? 0 : NULL_CHECKSUM_ARRAY.length;
}
@Override
public boolean renameMeta(URI destURI) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not support renaming metadata");
}
@Override
public boolean renameData(URI destURI) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not support renaming data");
}
@Override
public boolean getPinning(LocalFileSystem localFS) throws IOException {
return false;
}
@Override
public void setPinning(LocalFileSystem localFS) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not support pinning");
}
@Override
public void bumpReplicaGS(long newGS) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support writes");
}
@Override
public boolean breakHardLinksIfNeeded() throws IOException {
return false;
}
@Override
public ReplicaRecoveryInfo createInfo()
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support writes");
}
@Override
public int compareWith(ScanInfo info) {
if (info.getFileRegion().equals(
new FileRegion(this.getBlockId(), new Path(getRemoteURI()),
fileOffset, this.getNumBytes(), this.getGenerationStamp()))) {
return 0;
} else {
return (int) (info.getBlockLength() - getNumBytes());
}
}
@Override
public void truncateBlock(long newLength) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support truncate");
}
@Override
public void updateWithReplica(StorageLocation replicaLocation) {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support update");
}
@Override
public void copyMetadata(URI destination) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support copy metadata");
}
@Override
public void copyBlockdata(URI destination) throws IOException {
throw new UnsupportedOperationException(
"ProvidedReplica does not yet support copy data");
}
@VisibleForTesting
public void setPathHandle(PathHandle pathHandle) {
this.pathHandle = pathHandle;
}
}
|
ProvidedReplica
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StreamResourceLeakTest.java
|
{
"start": 7120,
"end": 7722
}
|
class ____ {
@MustBeClosed
Stream<String> f(Path p) throws IOException {
return Files.lines(p);
}
}
""")
.doTest();
}
@Test
public void returnFromMustBeClosedMethodWithChaining() {
testHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.MustBeClosed;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.stream.Stream;
|
Test
|
java
|
micronaut-projects__micronaut-core
|
management/src/main/java/io/micronaut/management/endpoint/loggers/impl/DefaultLoggersManager.java
|
{
"start": 1507,
"end": 3471
}
|
class ____ implements LoggersManager<Map<String, Object>> {
private static final String LEVELS = "levels";
private static final String LOGGERS = "loggers";
@Override
public Publisher<Map<String, Object>> getLoggers(ManagedLoggingSystem loggingSystem) {
Map<String, Object> data = new LinkedHashMap<>(2);
data.put(LEVELS, getLogLevels());
data.put(LOGGERS, getLoggerData(loggingSystem.getLoggers()));
return Flux.just(data);
}
@Override
public Publisher<Map<String, Object>> getLogger(ManagedLoggingSystem loggingSystem,
String name) {
return Flux.just(getLoggerData(loggingSystem.getLogger(name)));
}
@Override
public void setLogLevel(ManagedLoggingSystem loggingSystem, @NotBlank String name, @NotNull io.micronaut.logging.LogLevel level) {
loggingSystem.setLogLevel(name, level);
}
/**
* @param configurations The logger configurations
* @return A Map from logger name to logger configuration data
*/
private static Map<String, Object> getLoggerData(
Collection<LoggerConfiguration> configurations) {
return configurations
.stream()
.collect(Collectors.toMap(
LoggerConfiguration::getName,
LoggerConfiguration::getData,
(l1, l2) -> l1,
LinkedHashMap::new));
}
/**
* @param configuration The logger configuration
* @return The logger configuration data
*/
private static Map<String, Object> getLoggerData(
LoggerConfiguration configuration) {
return configuration.getData();
}
/**
* @return A list with all {@link io.micronaut.logging.LogLevel} values
*/
private static List<io.micronaut.logging.LogLevel> getLogLevels() {
return Arrays.asList(io.micronaut.logging.LogLevel.values());
}
}
|
DefaultLoggersManager
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/ExtendedBeanInfoTests.java
|
{
"start": 17747,
"end": 19333
}
|
class ____ {
// NON-VOID-RETURNING, NON-INDEXED write method
public C setFoos(String[] foos) { return this; }
// indexed read method
public String getFoos(int i) { return null; }
}
BeanInfo bi = Introspector.getBeanInfo(C.class);
assertThat(hasReadMethodForProperty(bi, "foos")).isFalse();
assertThat(hasIndexedReadMethodForProperty(bi, "foos")).isTrue();
assertThat(hasWriteMethodForProperty(bi, "foos")).isFalse();
assertThat(hasIndexedWriteMethodForProperty(bi, "foos")).isFalse();
BeanInfo ebi = new ExtendedBeanInfo(Introspector.getBeanInfo(C.class));
assertThat(hasReadMethodForProperty(ebi, "foos")).isFalse();
assertThat(hasIndexedReadMethodForProperty(ebi, "foos")).isTrue();
assertThat(hasWriteMethodForProperty(ebi, "foos")).isTrue();
assertThat(hasIndexedWriteMethodForProperty(ebi, "foos")).isFalse();
}
}
/**
* Prior to SPR-10111 (a follow-up fix for SPR-9702), this method would throw an
* IntrospectionException regarding a "type mismatch between indexed and non-indexed
* methods" intermittently (approximately one out of every four times) under JDK 7
* due to non-deterministic results from {@link Class#getDeclaredMethods()}.
* @see <a href="https://bugs.java.com/bugdatabase/view_bug.do?bug_id=7023180">JDK-7023180</a>
* @see #cornerSpr9702()
*/
@Test
void cornerSpr10111() {
assertThatNoException().isThrownBy(() -> new ExtendedBeanInfo(Introspector.getBeanInfo(BigDecimal.class)));
}
@Test
void subclassWriteMethodWithCovariantReturnType() throws Exception {
@SuppressWarnings("unused")
|
C
|
java
|
quarkusio__quarkus
|
integration-tests/oidc-wiremock/src/main/java/io/quarkus/it/keycloak/SaluterServiceImpl.java
|
{
"start": 373,
"end": 802
}
|
class ____ extends MutinySaluterGrpc.SaluterImplBase {
@Inject
SecurityIdentity securityIdentity;
@Override
public Uni<SaluteReply> bearer(SaluteRequest request) {
var principalName = securityIdentity.getPrincipal().getName();
return Uni.createFrom().item(SaluteReply.newBuilder()
.setMessage("Hi " + request.getName() + " from " + principalName).build());
}
}
|
SaluterServiceImpl
|
java
|
micronaut-projects__micronaut-core
|
http/src/test/java/io/micronaut/http/cookie/DefaultClientCookieEncoderTest.java
|
{
"start": 198,
"end": 705
}
|
class ____ {
@Test
void clientCookieEncoderIsDefaultClientCookieEncoder() {
assertInstanceOf(DefaultClientCookieEncoder.class, ClientCookieEncoder.INSTANCE);
}
@Test
void clientCookieEncoding() {
ClientCookieEncoder cookieEncoder = new DefaultClientCookieEncoder();
Cookie cookie = Cookie.of("SID", "31d4d96e407aad42").path("/").domain("example.com");
assertEquals("SID=31d4d96e407aad42", cookieEncoder.encode(cookie));
}
}
|
DefaultClientCookieEncoderTest
|
java
|
square__moshi
|
moshi/src/test/java/com/squareup/moshi/AdapterMethodsTest.java
|
{
"start": 27156,
"end": 27997
}
|
class ____<T> {
final T data;
public Box(T data) {
this.data = data;
}
@Override
public boolean equals(Object o) {
return o instanceof Box && ((Box) o).data.equals(data);
}
@Override
public int hashCode() {
return data.hashCode();
}
}
@Test
public void genericArrayTypes() throws Exception {
Moshi moshi = new Moshi.Builder().add(new ByteArrayJsonAdapter()).build();
JsonAdapter<MapOfByteArrays> jsonAdapter = moshi.adapter(MapOfByteArrays.class);
MapOfByteArrays mapOfByteArrays =
new MapOfByteArrays(Collections.singletonMap("a", new byte[] {0, -1}));
String json = "{\"map\":{\"a\":\"00ff\"}}";
assertThat(jsonAdapter.toJson(mapOfByteArrays)).isEqualTo(json);
assertThat(jsonAdapter.fromJson(json)).isEqualTo(mapOfByteArrays);
}
static
|
Box
|
java
|
quarkusio__quarkus
|
integration-tests/smallrye-config/src/test/java/io/quarkus/it/smallrye/config/User.java
|
{
"start": 301,
"end": 1576
}
|
class ____ {
@Id
@GeneratedValue(generator = "system-uuid")
@GenericGenerator(name = "system-uuid", strategy = "uuid")
private String id;
private String firstName;
private String lastName;
private Integer age;
public String getId() {
return id;
}
public User setId(final String id) {
this.id = id;
return this;
}
public String getFirstName() {
return firstName;
}
public User setFirstName(final String firstName) {
this.firstName = firstName;
return this;
}
public String getLastName() {
return lastName;
}
public User setLastName(final String lastName) {
this.lastName = lastName;
return this;
}
public Integer getAge() {
return age;
}
public User setAge(final Integer age) {
this.age = age;
return this;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final User user = (User) o;
return id.equals(user.id);
}
@Override
public int hashCode() {
return Objects.hash(id);
}
}
|
User
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationUpdateResponseInfo.java
|
{
"start": 1210,
"end": 1300
}
|
class ____ {
public ReservationUpdateResponseInfo() {
}
}
|
ReservationUpdateResponseInfo
|
java
|
google__guava
|
guava-testlib/test/com/google/common/testing/ClassSanityTesterTest.java
|
{
"start": 36826,
"end": 36979
}
|
class ____ {
@Keep
public AnAbstractClass(String s) {}
@Keep
public void failsToCheckNull(String s) {}
}
private static
|
AnAbstractClass
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/rest/FromRestPathPlaceholderTest.java
|
{
"start": 1191,
"end": 2497
}
|
class ____ extends ContextTestSupport {
@Override
protected Registry createCamelRegistry() throws Exception {
Registry jndi = super.createCamelRegistry();
jndi.bind("dummy-rest", new DummyRestConsumerFactory());
return jndi;
}
protected int getExpectedNumberOfRoutes() {
// routes are inlined
return 1;
}
@Test
public void testPlaceholder() {
assertEquals(getExpectedNumberOfRoutes(), context.getRoutes().size());
RestDefinition rest = context.getRestDefinitions().get(0);
assertNotNull(rest);
assertEquals("/say/{{mypath}}", rest.getPath());
// placeholder should be resolved, so we can find the rest endpoint that is a dummy (via seda)
assertNotNull(context.hasEndpoint("seda://get-say-hello"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.getPropertiesComponent().addInitialProperty("mypath", "hello");
restConfiguration().host("localhost");
rest("/say/{{mypath}}").get().to("direct:hello");
from("direct:hello").log("Hello");
}
};
}
}
|
FromRestPathPlaceholderTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java
|
{
"start": 5165,
"end": 6777
}
|
class ____ extends DenseVectorFunction {
protected final float[] queryVector;
/**
* Constructs a dense vector function used for float vectors.
*
* @param scoreScript The script in which this function was referenced.
* @param field The vector field.
* @param queryVector The query vector.
* @param normalizeQuery Whether the provided query should be normalized to unit length.
*/
public FloatDenseVectorFunction(
ScoreScript scoreScript,
DenseVectorDocValuesField field,
List<Number> queryVector,
boolean normalizeQuery
) {
super(scoreScript, field);
DenseVector.checkDimensions(field.get().getDims(), queryVector.size());
this.queryVector = new float[queryVector.size()];
double queryMagnitude = 0.0;
for (int i = 0; i < queryVector.size(); i++) {
float value = queryVector.get(i).floatValue();
this.queryVector[i] = value;
queryMagnitude += value * value;
}
queryMagnitude = Math.sqrt(queryMagnitude);
field.getElement().checkVectorBounds(this.queryVector);
if (normalizeQuery) {
for (int dim = 0; dim < this.queryVector.length; dim++) {
this.queryVector[dim] /= (float) queryMagnitude;
}
}
}
}
// Calculate l1 norm (Manhattan distance) between a query's dense vector and documents' dense vectors
public
|
FloatDenseVectorFunction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/qa/jdbc/security/src/test/java/org/elasticsearch/xpack/sql/qa/jdbc/security/JdbcSimpleExampleIT.java
|
{
"start": 463,
"end": 988
}
|
class ____ extends SimpleExampleTestCase {
@Override
protected Settings restClientSettings() {
return JdbcConnectionIT.securitySettings();
}
@Override
protected String getProtocol() {
return JdbcConnectionIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityUtils.adminProperties());
return properties;
}
}
|
JdbcSimpleExampleIT
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/Producible.java
|
{
"start": 1996,
"end": 2441
}
|
enum ____ should be used as the default value when an accept header
* of */* is provided, or if the {@code Accept} header is missing. Only
* one value can be marked as default. If no value is marked, then the value with the
* highest {@link Enum#ordinal() ordinal} is used as the default.
* @return if this value should be used as the default value
* @since 2.5.6
*/
default boolean isDefault() {
return false;
}
}
|
value
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/proxy/concrete/ConcreteProxyTest.java
|
{
"start": 13716,
"end": 14003
}
|
class ____ extends SingleBase {
private Integer child2Prop;
public SingleChild2() {
}
public SingleChild2(Long id, Integer child2Prop) {
super( id );
this.child2Prop = child2Prop;
}
}
// InheritanceType.JOINED
@Entity(name = "JoinedParent")
public static
|
SingleChild2
|
java
|
google__guice
|
extensions/persist/src/com/google/inject/persist/jpa/JpaFinderProxy.java
|
{
"start": 1494,
"end": 3429
}
|
class ____ implements MethodInterceptor {
private final Map<Method, FinderDescriptor> finderCache = new MapMaker().weakKeys().makeMap();
private final Provider<EntityManager> emProvider;
@Inject
public JpaFinderProxy(Provider<EntityManager> emProvider) {
this.emProvider = emProvider;
}
@Override
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
EntityManager em = emProvider.get();
//obtain a cached finder descriptor (or create a new one)
JpaFinderProxy.FinderDescriptor finderDescriptor = getFinderDescriptor(methodInvocation);
Object result = null;
//execute as query (named params or otherwise)
Query jpaQuery = finderDescriptor.createQuery(em);
if (finderDescriptor.isBindAsRawParameters) {
bindQueryRawParameters(jpaQuery, finderDescriptor, methodInvocation.getArguments());
} else {
bindQueryNamedParameters(jpaQuery, finderDescriptor, methodInvocation.getArguments());
}
//depending upon return type, decorate or return the result as is
if (JpaFinderProxy.ReturnType.PLAIN.equals(finderDescriptor.returnType)) {
result = jpaQuery.getSingleResult();
} else if (JpaFinderProxy.ReturnType.COLLECTION.equals(finderDescriptor.returnType)) {
result = getAsCollection(finderDescriptor, jpaQuery.getResultList());
} else if (JpaFinderProxy.ReturnType.ARRAY.equals(finderDescriptor.returnType)) {
result = jpaQuery.getResultList().toArray();
}
return result;
}
@SuppressWarnings({"unchecked", "rawtypes"}) // JPA Query returns raw type.
private Object getAsCollection(JpaFinderProxy.FinderDescriptor finderDescriptor, List results) {
Collection<?> collection;
try {
collection = (Collection) finderDescriptor.returnCollectionTypeConstructor.newInstance();
} catch (InstantiationException e) {
throw new RuntimeException(
"Specified collection
|
JpaFinderProxy
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQueryTests.java
|
{
"start": 1921,
"end": 7381
}
|
class ____ extends AbstractScriptFieldQueryTestCase<
GeoPointScriptFieldDistanceFeatureQuery> {
private final Function<LeafReaderContext, AbstractLongFieldScript> leafFactory = ctx -> null;
@Override
protected GeoPointScriptFieldDistanceFeatureQuery createTestInstance() {
double lat = GeoTestUtil.nextLatitude();
double lon = GeoTestUtil.nextLongitude();
double pivot = randomDouble() * GeoUtils.EARTH_EQUATOR;
return new GeoPointScriptFieldDistanceFeatureQuery(randomScript(), leafFactory, randomAlphaOfLength(5), lat, lon, pivot);
}
@Override
protected GeoPointScriptFieldDistanceFeatureQuery copy(GeoPointScriptFieldDistanceFeatureQuery orig) {
return new GeoPointScriptFieldDistanceFeatureQuery(
orig.script(),
leafFactory,
orig.fieldName(),
orig.lat(),
orig.lon(),
orig.pivot()
);
}
@Override
protected GeoPointScriptFieldDistanceFeatureQuery mutate(GeoPointScriptFieldDistanceFeatureQuery orig) {
Script script = orig.script();
String fieldName = orig.fieldName();
double lat = orig.lat();
double lon = orig.lon();
double pivot = orig.pivot();
switch (randomInt(4)) {
case 0 -> script = randomValueOtherThan(script, this::randomScript);
case 1 -> fieldName += "modified";
case 2 -> lat = randomValueOtherThan(lat, GeoTestUtil::nextLatitude);
case 3 -> lon = randomValueOtherThan(lon, GeoTestUtil::nextLongitude);
case 4 -> pivot = randomValueOtherThan(pivot, () -> randomDouble() * GeoUtils.EARTH_EQUATOR);
default -> fail();
}
return new GeoPointScriptFieldDistanceFeatureQuery(script, leafFactory, fieldName, lat, lon, pivot);
}
@Override
public void testMatches() throws IOException {
IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random()));
// Use LogDocMergePolicy to avoid randomization issues with the doc retrieval order.
config.setMergePolicy(new LogDocMergePolicy());
try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory, config)) {
iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"location\": [34, 6]}"))));
iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"location\": [-3.56, -45.98]}"))));
try (DirectoryReader reader = iw.getReader()) {
IndexSearcher searcher = newSearcher(reader);
SearchLookup searchLookup = new SearchLookup(
null,
null,
SourceProvider.fromLookup(MappingLookup.EMPTY, null, SourceFieldMetrics.NOOP)
);
Function<LeafReaderContext, GeoPointFieldScript> leafFactory = ctx -> new GeoPointFieldScript(
"test",
Map.of(),
searchLookup,
OnScriptError.FAIL,
ctx
) {
@Override
@SuppressWarnings("unchecked")
public void execute() {
Map<String, Object> source = (Map<String, Object>) this.getParams().get("_source");
GeoPoint point = GeoUtils.parseGeoPoint(source.get("location"), true);
emit(point.lat(), point.lon());
}
};
GeoPointScriptFieldDistanceFeatureQuery query = new GeoPointScriptFieldDistanceFeatureQuery(
randomScript(),
GeoPointScriptFieldType.valuesEncodedAsLong(searchLookup, "test", leafFactory),
"test",
0,
0,
30000
);
TopDocs td = searcher.search(query, 2);
assertThat(td.scoreDocs[0].score, equalTo(0.0077678584F));
assertThat(td.scoreDocs[0].doc, equalTo(0));
assertThat(td.scoreDocs[1].score, equalTo(0.005820022F));
assertThat(td.scoreDocs[1].doc, equalTo(1));
}
}
}
public void testMaxScore() throws IOException {
try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
iw.addDocument(List.of());
try (DirectoryReader reader = iw.getReader()) {
IndexSearcher searcher = newSearcher(reader);
GeoPointScriptFieldDistanceFeatureQuery query = createTestInstance();
float boost = randomFloat();
assertThat(
query.createWeight(searcher, ScoreMode.COMPLETE, boost).scorer(reader.leaves().get(0)).getMaxScore(randomInt()),
equalTo(boost)
);
}
}
}
@Override
protected void assertToString(GeoPointScriptFieldDistanceFeatureQuery query) {
assertThat(
query.toString(query.fieldName()),
equalTo("GeoPointScriptFieldDistanceFeatureQuery(lat=" + query.lat() + ",lon=" + query.lon() + ",pivot=" + query.pivot() + ")")
);
}
@Override
public final void testVisit() {
assertEmptyVisit();
}
}
|
GeoPointScriptFieldDistanceFeatureQueryTests
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/utils/Utils.java
|
{
"start": 61571,
"end": 62111
}
|
enum ____; never null, but may be empty
* if there are no values defined for the enum
*/
public static String[] enumOptions(Class<? extends Enum<?>> enumClass) {
Objects.requireNonNull(enumClass);
if (!enumClass.isEnum()) {
throw new IllegalArgumentException("Class " + enumClass + " is not an enumerable type");
}
return Stream.of(enumClass.getEnumConstants())
.map(Object::toString)
.toArray(String[]::new);
}
/**
* Ensure that the
|
class
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_anyMatch_Test.java
|
{
"start": 943,
"end": 1434
}
|
class ____ extends IterableAssertBaseTest {
private Predicate<Object> predicate;
@BeforeEach
void beforeEach() {
predicate = o -> o != null;
}
@Override
protected ConcreteIterableAssert<Object> invoke_api_method() {
return assertions.anyMatch(predicate);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertAnyMatch(getInfo(assertions), getActual(assertions), predicate, PredicateDescription.GIVEN);
}
}
|
IterableAssert_anyMatch_Test
|
java
|
apache__spark
|
common/network-common/src/main/java/org/apache/spark/network/util/TimerWithCustomTimeUnit.java
|
{
"start": 1748,
"end": 2565
}
|
class ____ extends Timer {
private final TimeUnit timeUnit;
private final double nanosPerUnit;
public TimerWithCustomTimeUnit(TimeUnit timeUnit) {
this(timeUnit, Clock.defaultClock());
}
TimerWithCustomTimeUnit(TimeUnit timeUnit, Clock clock) {
super(new ExponentiallyDecayingReservoir(), clock);
this.timeUnit = timeUnit;
this.nanosPerUnit = timeUnit.toNanos(1);
}
@Override
public Snapshot getSnapshot() {
return new SnapshotWithCustomTimeUnit(super.getSnapshot());
}
private double toUnit(double nanos) {
// TimeUnit.convert() truncates (loses precision), so floating-point division is used instead
return nanos / nanosPerUnit;
}
private long toUnit(long nanos) {
return timeUnit.convert(nanos, TimeUnit.NANOSECONDS);
}
private
|
TimerWithCustomTimeUnit
|
java
|
quarkusio__quarkus
|
extensions/funqy/funqy-knative-events/runtime/src/main/java/io/quarkus/funqy/runtime/bindings/knative/events/HeaderCloudEventImpl.java
|
{
"start": 598,
"end": 5083
}
|
class ____<T> extends AbstractCloudEvent<T> implements CloudEvent<T> {
String id;
String specVersion;
String source;
String type;
String subject;
OffsetDateTime time;
Map<String, String> extensions;
String dataSchema;
String dataContentType;
T data;
final MultiMap headers;
final Buffer buffer;
final Type dataType;
final ObjectMapper mapper;
private ObjectReader reader;
HeaderCloudEventImpl(MultiMap headers, Buffer buffer, Type dataType, ObjectMapper mapper, ObjectReader reader) {
this.headers = headers;
this.buffer = buffer;
this.dataType = dataType;
this.mapper = mapper;
this.reader = reader;
}
@Override
public String id() {
if (id == null) {
id = headers.get("ce-id");
}
return id;
}
@Override
public String specVersion() {
if (specVersion == null) {
String sv = headers.get("ce-specversion");
if (sv != null) {
this.specVersion = sv;
}
}
return specVersion;
}
@Override
public String source() {
if (source == null && headers.contains("ce-source")) {
source = headers.get("ce-source");
}
return source;
}
@Override
public String type() {
if (type == null) {
type = headers.get("ce-type");
}
return type;
}
@Override
public String subject() {
if (subject == null) {
subject = headers.get("ce-subject");
}
return subject;
}
@Override
public OffsetDateTime time() {
if (time == null) {
String t = headers.get("ce-time");
if (t != null) {
time = OffsetDateTime.parse(t);
}
}
return time;
}
private static final Set<String> reservedHeaders;
static {
Set<String> ra = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
ra.add("ce-specversion");
ra.add("ce-id");
ra.add("ce-source");
ra.add("ce-type");
ra.add("Content-Type");
ra.add("ce-subject");
ra.add("ce-time");
ra.add("ce-datacontentencoding");
ra.add("ce-schemaurl");
ra.add("ce-dataschema");
reservedHeaders = Collections.unmodifiableSet(ra);
}
private static boolean isCEHeader(String value) {
return (value.charAt(0) == 'C' || value.charAt(0) == 'c') &&
(value.charAt(1) == 'E' || value.charAt(1) == 'e') &&
value.charAt(2) == '-';
}
@Override
public Map<String, String> extensions() {
if (extensions == null) {
extensions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
for (Map.Entry<String, String> entry : headers) {
if (isCEHeader(entry.getKey()) && !reservedHeaders.contains(entry.getKey())) {
extensions.put(entry.getKey().substring(3), entry.getValue());
}
}
extensions = Collections.unmodifiableMap(extensions);
}
return extensions;
}
@Override
public String dataSchema() {
if (dataSchema == null) {
String dsName = majorSpecVersion() == 0 ? "ce-schemaurl" : "ce-dataschema";
dataSchema = headers.get(dsName);
}
return dataSchema;
}
@Override
public String dataContentType() {
if (dataContentType == null) {
dataContentType = headers.get("Content-Type");
}
return dataContentType;
}
@Override
public T data() {
if (data != null) {
return data;
}
if (dataContentType() != null && dataContentType().startsWith("application/json") && !byte[].class.equals(dataType)) {
try {
data = reader.readValue(buffer.getBytes());
return data;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} else if (byte[].class.equals(dataType)) {
data = (T) buffer.getBytes();
return data;
} else {
String msg = String.format("Don't know how to get event data (dataContentType: '%s', javaType: '%s').",
dataContentType(), dataType.getTypeName());
throw new RuntimeException(msg);
}
}
}
|
HeaderCloudEventImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java
|
{
"start": 16714,
"end": 20175
}
|
class ____ {
public static InferenceServiceConfiguration get() {
return configuration.getOrCompute();
}
private static final LazyInitializable<InferenceServiceConfiguration, RuntimeException> configuration = new LazyInitializable<>(
() -> {
var configurationMap = new HashMap<String, SettingsConfiguration>();
configurationMap.put(
SERVICE_ID,
new SettingsConfiguration.Builder(supportedTaskTypes).setDescription(
"The name of the model service to use for the {infer} task."
)
.setLabel("Project ID")
.setRequired(true)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.put(
HOST,
new SettingsConfiguration.Builder(supportedTaskTypes).setDescription(
"The name of the host address used for the {infer} task. You can find the host address at "
+ "https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] "
+ "of the documentation."
)
.setLabel("Host")
.setRequired(true)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.put(
HTTP_SCHEMA_NAME,
new SettingsConfiguration.Builder(supportedTaskTypes).setDescription("")
.setLabel("HTTP Schema")
.setRequired(false)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.put(
WORKSPACE_NAME,
new SettingsConfiguration.Builder(supportedTaskTypes).setDescription(
"The name of the workspace used for the {infer} task."
)
.setLabel("Workspace")
.setRequired(true)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.putAll(
DefaultSecretSettings.toSettingsConfigurationWithDescription(
"A valid API key for the AlibabaCloud AI Search API.",
supportedTaskTypes
)
);
configurationMap.putAll(RateLimitSettings.toSettingsConfiguration(supportedTaskTypes));
return new InferenceServiceConfiguration.Builder().setService(NAME)
.setName(SERVICE_NAME)
.setTaskTypes(supportedTaskTypes)
.setConfigurations(configurationMap)
.build();
}
);
}
}
|
Configuration
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerMetrics.java
|
{
"start": 1117,
"end": 1894
}
|
class ____ {
public final SenderMetricsRegistry senderMetrics;
public ProducerMetrics(Metrics metrics) {
this.senderMetrics = new SenderMetricsRegistry(metrics);
}
private List<MetricNameTemplate> getAllTemplates() {
return new ArrayList<>(this.senderMetrics.allTemplates());
}
public static void main(String[] args) {
Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id");
MetricConfig metricConfig = new MetricConfig().tags(metricTags);
Metrics metrics = new Metrics(metricConfig);
ProducerMetrics metricsRegistry = new ProducerMetrics(metrics);
System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates()));
}
}
|
ProducerMetrics
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
|
{
"start": 2346,
"end": 5814
}
|
class ____ implements HealthIndicatorService {
public static final String NAME = "repository_integrity";
private static final String HELP_URL = "https://ela.st/fix-repository-integrity";
public static final String NO_REPOS_CONFIGURED = "No snapshot repositories configured.";
public static final String ALL_REPOS_HEALTHY = "All repositories are healthy.";
public static final String NO_REPO_HEALTH_INFO = "No repository health info.";
public static final String MIXED_VERSIONS =
"No repository health info. The cluster currently has mixed versions (an upgrade may be in progress).";
public static final List<HealthIndicatorImpact> IMPACTS = List.of(
new HealthIndicatorImpact(
NAME,
"backups_at_risk",
2,
"Data in the affected snapshot repositories may be lost and cannot be restored.",
List.of(ImpactArea.BACKUP)
)
);
public static final Diagnosis.Definition CORRUPTED_DEFINITION = new Diagnosis.Definition(
NAME,
"corrupt_repo_integrity",
"Multiple clusters are writing to the same repository.",
"Remove the repository from the other cluster(s), or mark it as read-only in the other cluster(s), and then re-add the repository"
+ " to this cluster.",
HELP_URL
);
public static final Diagnosis.Definition UNKNOWN_DEFINITION = new Diagnosis.Definition(
NAME,
"unknown_repository",
"The repository uses an unknown type.",
"Ensure that all required plugins are installed on the affected nodes.",
HELP_URL
);
public static final Diagnosis.Definition INVALID_DEFINITION = new Diagnosis.Definition(
NAME,
"invalid_repository",
"An exception occurred while trying to initialize the repository.",
"""
Make sure all nodes in the cluster are in sync with each other.\
Refer to the nodes’ logs for detailed information on why the repository initialization failed.""",
HELP_URL
);
private final ClusterService clusterService;
public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService) {
this.clusterService = clusterService;
}
@Override
public String name() {
return NAME;
}
@Override
public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) {
var clusterState = clusterService.state();
var snapshotMetadata = RepositoriesMetadata.get(clusterService.state());
var repositories = snapshotMetadata.repositories();
if (repositories.isEmpty()) {
return createIndicator(GREEN, NO_REPOS_CONFIGURED, HealthIndicatorDetails.EMPTY, List.of(), List.of());
}
var repositoryHealthAnalyzer = new RepositoryHealthAnalyzer(clusterState, repositories, healthInfo.repositoriesInfoByNode());
return createIndicator(
repositoryHealthAnalyzer.getHealthStatus(),
repositoryHealthAnalyzer.getSymptom(),
repositoryHealthAnalyzer.getDetails(verbose),
repositoryHealthAnalyzer.getImpacts(),
repositoryHealthAnalyzer.getDiagnoses(verbose, maxAffectedResourcesCount)
);
}
/**
* Analyzer for the cluster's repositories health; aids in constructing a {@link HealthIndicatorResult}.
*/
|
RepositoryIntegrityHealthIndicatorService
|
java
|
apache__camel
|
components/camel-dynamic-router/src/test/java/org/apache/camel/component/dynamicrouter/filter/DynamicRouterFilterServiceTest.java
|
{
"start": 1797,
"end": 13496
}
|
class ____ {
static final String DYNAMIC_ROUTER_CHANNEL = "test";
@Mock
PrioritizedFilter prioritizedFilter;
@Mock
PrioritizedFilterStatistics prioritizedFilterStatistics;
@Mock
Exchange exchange;
@Mock
Message message;
@Mock
Predicate predicate;
PrioritizedFilterFactory prioritizedFilterFactory;
DynamicRouterFilterService filterService;
@BeforeEach
void setup() {
prioritizedFilterFactory = new PrioritizedFilterFactory() {
@Override
public PrioritizedFilter getInstance(
String id, int priority, Predicate predicate, String endpoint, PrioritizedFilterStatistics statistics) {
return prioritizedFilter;
}
};
this.filterService = new DynamicRouterFilterService(() -> prioritizedFilterFactory);
}
@Test
void testDefaultConstruct() {
assertDoesNotThrow(() -> new DynamicRouterFilterService());
}
@Test
void testConstruct() {
assertNotNull(filterService);
}
@Test
void testInitializeChannelFilters() {
filterService.initializeChannelFilters(DYNAMIC_ROUTER_CHANNEL);
Collection<PrioritizedFilter> filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertNotNull(filters);
assertTrue(filters.isEmpty());
}
@Test
void testCreateFilter() {
PrioritizedFilter filter = filterService.createFilter(
DYNAMIC_ROUTER_CHANNEL, 1, PredicateBuilder.constant(true), "endpoint",
prioritizedFilterStatistics);
assertEquals(prioritizedFilter, filter);
}
@Test
void testAddFilter() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(
"id", 1, PredicateBuilder.constant(true), "endpoint", DYNAMIC_ROUTER_CHANNEL, false);
Collection<PrioritizedFilter> filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filters.size());
}
@Test
void testAddFilterInstance() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Collection<PrioritizedFilter> filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filters.size());
}
@Test
void testAddFilterInstanceAlreadyExists() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Collection<PrioritizedFilter> filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filters.size());
String result = filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
assertEquals("Error: Filter could not be added -- existing filter found with matching ID: true", result);
}
@Test
void testUpdateFilter() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Collection<PrioritizedFilter> filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filters.size());
PrioritizedFilter filter = filters.stream()
.findFirst()
.orElseThrow(() -> new AssertionError("Could not get added filter"));
// Verify filter priority is (originally) 1
assertEquals(1, filter.priority());
// Update filter (change priority from 1 to 10)
Mockito.when(prioritizedFilter.priority()).thenReturn(10);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, true);
filters = filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filters.size());
filter = filters.stream()
.findFirst()
.orElseThrow(() -> new AssertionError("Could not get added filter"));
// Verify filter priority is now 10
assertEquals(10, filter.priority());
}
@Test
void testUpdateFilterDoesNotExist() {
String result = filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, true);
assertEquals("Error: Filter could not be updated -- existing filter found with matching ID: false", result);
}
@Test
void testGetFilter() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
PrioritizedFilter filter = filterService.getFilterById("id", DYNAMIC_ROUTER_CHANNEL);
assertEquals(prioritizedFilter, filter);
}
@Test
void testGetFilterWithoutChannel() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
PrioritizedFilter filter = filterService.getFilterById("id", null);
assertEquals(prioritizedFilter, filter);
}
@Test
void testRemoveFilter() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
assertEquals(1, filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).size());
filterService.removeFilterById("id", DYNAMIC_ROUTER_CHANNEL);
assertTrue(filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).isEmpty());
}
@Test
void testRemoveNonexistentFilter() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
assertEquals(1, filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).size());
filterService.removeFilterById("camel", DYNAMIC_ROUTER_CHANNEL);
assertEquals(1, filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).size());
}
@Test
void testRemoveFilterWithoutChannel() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
assertEquals(1, filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).size());
filterService.removeFilterById("id", null);
assertTrue(filterService.getFiltersForChannel(DYNAMIC_ROUTER_CHANNEL).isEmpty());
}
@Test
void testGetChannelFilters() {
String channel = "test";
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Collection<PrioritizedFilter> result = filterService.getFiltersForChannel(channel);
assertEquals(1, result.size());
}
@Test
void testGetFilters() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Map<String, ConcurrentSkipListSet<PrioritizedFilter>> result = filterService.getFilterMap();
assertEquals(1, result.size());
}
@Test
void testGetChannelStatistics() {
String channel = "test";
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
Mockito.when(prioritizedFilter.statistics()).thenReturn(prioritizedFilterStatistics);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
List<PrioritizedFilterStatistics> result = filterService.getStatisticsForChannel(channel);
assertEquals(1, result.size());
}
@Test
void testGetStatistics() {
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
Mockito.when(prioritizedFilter.statistics()).thenReturn(prioritizedFilterStatistics);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
Map<String, List<PrioritizedFilterStatistics>> result = filterService.getFilterStatisticsMap();
assertEquals(1, result.size());
}
@Test
void testGetMatchingEndpointsForExchangeByChannel() {
String channel = "test";
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
Mockito.when(prioritizedFilter.predicate()).thenReturn(predicate);
Mockito.when(prioritizedFilter.statistics()).thenReturn(prioritizedFilterStatistics);
Mockito.when(prioritizedFilter.endpoint()).thenReturn("testEndpoint");
Mockito.doNothing().when(prioritizedFilterStatistics).incrementCount();
Mockito.when(predicate.matches(exchange)).thenReturn(true);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
String result = filterService.getMatchingEndpointsForExchangeByChannel(exchange, channel, true, false);
Assertions.assertEquals("testEndpoint", result);
}
@Test
void testGetMatchingEndpointsForExchangeByChannelWithNoMatchingRecipients() {
String channel = "test";
Mockito.when(exchange.getMessage()).thenReturn(message);
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
Mockito.when(prioritizedFilter.predicate()).thenReturn(predicate);
Mockito.when(prioritizedFilter.statistics()).thenReturn(prioritizedFilterStatistics);
Mockito.when(predicate.matches(exchange)).thenReturn(false);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
String result = filterService.getMatchingEndpointsForExchangeByChannel(exchange, channel, false, false);
assertEquals("log:org.apache.camel.component.dynamicrouter.filter.DynamicRouterFilterService.test" +
"?level=DEBUG" +
"&showAll=true" +
"&multiline=true",
result);
}
@Test
void testGetMatchingEndpointsForExchangeByChannelWithNoMatchingRecipientsWithWarnDroppedMessage() {
String channel = "test";
Mockito.when(exchange.getMessage()).thenReturn(message);
Mockito.when(prioritizedFilter.id()).thenReturn("id");
Mockito.when(prioritizedFilter.priority()).thenReturn(1);
Mockito.when(prioritizedFilter.predicate()).thenReturn(predicate);
Mockito.when(prioritizedFilter.statistics()).thenReturn(prioritizedFilterStatistics);
Mockito.when(predicate.matches(exchange)).thenReturn(false);
filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, false);
String result = filterService.getMatchingEndpointsForExchangeByChannel(exchange, channel, false, true);
assertEquals("log:org.apache.camel.component.dynamicrouter.filter.DynamicRouterFilterService.test" +
"?level=WARN" +
"&showAll=true" +
"&multiline=true",
result);
}
}
|
DynamicRouterFilterServiceTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/factories/CustomListImpl.java
|
{
"start": 260,
"end": 537
}
|
class ____<T> extends LinkedList<T> implements CustomList<T> {
private final String typeProp;
public CustomListImpl(String typeProp) {
this.typeProp = typeProp;
}
@Override
public String getTypeProp() {
return typeProp;
}
}
|
CustomListImpl
|
java
|
apache__kafka
|
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TopicIdsTest.java
|
{
"start": 1455,
"end": 7250
}
|
class ____ {
@Test
public void testTopicNamesCannotBeNull() {
assertThrows(NullPointerException.class, () -> new TopicIds(null, CoordinatorMetadataImage.EMPTY));
}
@Test
public void testTopicsImageCannotBeNull() {
assertThrows(NullPointerException.class, () -> new TopicIds(Set.of(), (CoordinatorMetadataImage) null));
}
@Test
public void testTopicResolverCannotBeNull() {
assertThrows(NullPointerException.class, () -> new TopicIds(Set.of(), (TopicIds.TopicResolver) null));
}
@Test
public void testSize() {
Set<String> topicNames = Set.of("foo", "bar", "baz");
Set<Uuid> topicIds = new TopicIds(topicNames, CoordinatorMetadataImage.EMPTY);
assertEquals(topicNames.size(), topicIds.size());
}
@Test
public void testIsEmpty() {
Set<String> topicNames = Set.of();
Set<Uuid> topicIds = new TopicIds(topicNames, CoordinatorMetadataImage.EMPTY);
assertEquals(topicNames.size(), topicIds.size());
}
@Test
public void testContains() {
Uuid fooUuid = Uuid.randomUuid();
Uuid barUuid = Uuid.randomUuid();
Uuid bazUuid = Uuid.randomUuid();
Uuid quxUuid = Uuid.randomUuid();
CoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(fooUuid, "foo", 3)
.addTopic(barUuid, "bar", 3)
.addTopic(bazUuid, "qux", 3)
.build());
Set<Uuid> topicIds = new TopicIds(Set.of("foo", "bar", "baz"), metadataImage);
assertTrue(topicIds.contains(fooUuid));
assertTrue(topicIds.contains(barUuid));
assertFalse(topicIds.contains(bazUuid));
assertFalse(topicIds.contains(quxUuid));
}
@Test
public void testContainsAll() {
Uuid fooUuid = Uuid.randomUuid();
Uuid barUuid = Uuid.randomUuid();
Uuid bazUuid = Uuid.randomUuid();
Uuid quxUuid = Uuid.randomUuid();
CoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(fooUuid, "foo", 3)
.addTopic(barUuid, "bar", 3)
.addTopic(bazUuid, "baz", 3)
.addTopic(quxUuid, "qux", 3)
.build());
Set<Uuid> topicIds = new TopicIds(Set.of("foo", "bar", "baz", "qux"), metadataImage);
assertTrue(topicIds.contains(fooUuid));
assertTrue(topicIds.contains(barUuid));
assertTrue(topicIds.contains(bazUuid));
assertTrue(topicIds.contains(quxUuid));
assertTrue(topicIds.containsAll(Set.of(fooUuid, barUuid, bazUuid, quxUuid)));
}
@Test
public void testContainsAllOneTopicConversionFails() {
// topic 'qux' only exists as topic name.
Uuid fooUuid = Uuid.randomUuid();
Uuid barUuid = Uuid.randomUuid();
Uuid bazUuid = Uuid.randomUuid();
Uuid quxUuid = Uuid.randomUuid();
CoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(fooUuid, "foo", 3)
.addTopic(barUuid, "bar", 3)
.addTopic(bazUuid, "baz", 3)
.build());
Set<Uuid> topicIds = new TopicIds(Set.of("foo", "bar", "baz", "qux"), metadataImage);
assertTrue(topicIds.contains(fooUuid));
assertTrue(topicIds.contains(barUuid));
assertTrue(topicIds.contains(bazUuid));
assertTrue(topicIds.containsAll(Set.of(fooUuid, barUuid, bazUuid)));
assertFalse(topicIds.containsAll(Set.of(fooUuid, barUuid, bazUuid, quxUuid)));
}
@Test
public void testIterator() {
Uuid fooUuid = Uuid.randomUuid();
Uuid barUuid = Uuid.randomUuid();
Uuid bazUuid = Uuid.randomUuid();
Uuid quxUuid = Uuid.randomUuid();
CoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(fooUuid, "foo", 3)
.addTopic(barUuid, "bar", 3)
.addTopic(bazUuid, "baz", 3)
.addTopic(quxUuid, "qux", 3)
.build());
Set<Uuid> topicIds = new TopicIds(Set.of("foo", "bar", "baz", "qux"), metadataImage);
Set<Uuid> expectedIds = Set.of(fooUuid, barUuid, bazUuid, quxUuid);
Set<Uuid> actualIds = new HashSet<>(topicIds);
assertEquals(expectedIds, actualIds);
}
@Test
public void testIteratorOneTopicConversionFails() {
// topic 'qux' only exists as topic id.
// topic 'quux' only exists as topic name.
Uuid fooUuid = Uuid.randomUuid();
Uuid barUuid = Uuid.randomUuid();
Uuid bazUuid = Uuid.randomUuid();
Uuid qux = Uuid.randomUuid();
CoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(fooUuid, "foo", 3)
.addTopic(barUuid, "bar", 3)
.addTopic(bazUuid, "baz", 3)
.addTopic(qux, "qux", 3)
.build());
Set<Uuid> topicIds = new TopicIds(Set.of("foo", "bar", "baz", "quux"), metadataImage);
Set<Uuid> expectedIds = Set.of(fooUuid, barUuid, bazUuid);
Set<Uuid> actualIds = new HashSet<>(topicIds);
assertEquals(expectedIds, actualIds);
}
@Test
public void testEquals() {
Uuid topicId = Uuid.randomUuid();
KRaftCoordinatorMetadataImage metadataImage = new KRaftCoordinatorMetadataImage(new MetadataImageBuilder()
.addTopic(topicId, "topicId", 3)
.build());
TopicIds topicIds1 = new TopicIds(Set.of("topic"), metadataImage);
TopicIds topicIds2 = new TopicIds(Set.of("topic"), metadataImage);
assertEquals(topicIds1, topicIds2);
}
}
|
TopicIdsTest
|
java
|
netty__netty
|
transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringBufferRingConfig.java
|
{
"start": 968,
"end": 7122
}
|
class ____ {
private final short bgId;
private final short bufferRingSize;
private final int batchSize;
private final int maxUnreleasedBuffers;
private final boolean incremental;
private final IoUringBufferRingAllocator allocator;
private final boolean batchAllocation;
/**
* Create a new configuration.
*
* @param bgId the buffer group id to use (must be non-negative).
* @param bufferRingSize the size of the ring
* @param maxUnreleasedBuffers this parameter is ignored by the buffer ring.
* @param allocator the {@link IoUringBufferRingAllocator} to use to allocate
* {@link io.netty.buffer.ByteBuf}s.
* @deprecated use {@link Builder}.
*/
@Deprecated
public IoUringBufferRingConfig(short bgId, short bufferRingSize, int maxUnreleasedBuffers,
IoUringBufferRingAllocator allocator) {
this(bgId, bufferRingSize, bufferRingSize / 2, maxUnreleasedBuffers,
IoUring.isRegisterBufferRingIncSupported(), allocator);
}
/**
* Create a new configuration.
*
* @param bgId the buffer group id to use (must be non-negative).
* @param bufferRingSize the size of the ring
* @param batchSize the size of the batch on how many buffers are added everytime we need to expand the
* buffer ring.
* @param maxUnreleasedBuffers this parameter is ignored by the buffer ring.
* @param incremental {@code true} if the buffer ring is using incremental buffer consumption.
* @param allocator the {@link IoUringBufferRingAllocator} to use to allocate
* {@link io.netty.buffer.ByteBuf}s.
* @deprecated use {@link Builder}.
*/
@Deprecated
public IoUringBufferRingConfig(short bgId, short bufferRingSize, int batchSize, int maxUnreleasedBuffers,
boolean incremental, IoUringBufferRingAllocator allocator) {
this(bgId, bufferRingSize, batchSize, maxUnreleasedBuffers, incremental, allocator, false);
}
private IoUringBufferRingConfig(short bgId, short bufferRingSize, int batchSize, int maxUnreleasedBuffers,
boolean incremental, IoUringBufferRingAllocator allocator, boolean batchAllocation) {
this.bgId = (short) ObjectUtil.checkPositiveOrZero(bgId, "bgId");
this.bufferRingSize = checkBufferRingSize(bufferRingSize);
this.batchSize = MathUtil.findNextPositivePowerOfTwo(
ObjectUtil.checkInRange(batchSize, 1, bufferRingSize, "batchSize"));
this.maxUnreleasedBuffers = ObjectUtil.checkInRange(
maxUnreleasedBuffers, bufferRingSize, Integer.MAX_VALUE, "maxUnreleasedBuffers");
if (incremental && !IoUring.isRegisterBufferRingIncSupported()) {
throw new IllegalArgumentException("Incremental buffer ring is not supported");
}
this.incremental = incremental;
this.allocator = ObjectUtil.checkNotNull(allocator, "allocator");
this.batchAllocation = batchAllocation;
}
/**
* Returns the buffer group id to use.
*
* @return the buffer group id to use.
*/
public short bufferGroupId() {
return bgId;
}
/**
* Returns the size of the ring.
*
* @return the size of the ring.
*/
public short bufferRingSize() {
return bufferRingSize;
}
/**
* Returns the size of the batch on how many buffers are added everytime we need to expand the buffer ring.
*
* @return batch size.
*/
public int batchSize() {
return batchSize;
}
/**
* Returns the maximum buffers that can be allocated out of this buffer ring and are
* unreleased yet
*
* @return the max unreleased buffers.
* @deprecated will be removed as it as no effect.
*/
@Deprecated
public int maxUnreleasedBuffers() {
return maxUnreleasedBuffers;
}
/**
* Returns the {@link IoUringBufferRingAllocator} to use to allocate {@link io.netty.buffer.ByteBuf}s.
*
* @return the allocator.
*/
public IoUringBufferRingAllocator allocator() {
return allocator;
}
/**
* Returns {@code true} if the ring should always be filled via a batch allocation or
* {@code false} if we will try to allocate a new {@link ByteBuf} as we used a buffer from the ring.
* @return {@code true} if the ring should always be filled via a batch allocation.
*/
public boolean isBatchAllocation() {
return batchAllocation;
}
/**
* Returns true if <a href="https://github.com/axboe/liburing/wiki/
* What's-new-with-io_uring-in-6.11-and-6.12#incremental-provided-buffer-consumption">incremental mode</a>
* should be used for the buffer ring.
*
* @return {@code true} if incremental mode is used, {@code false} otherwise.
*/
public boolean isIncremental() {
return incremental;
}
private static short checkBufferRingSize(short bufferRingSize) {
if (bufferRingSize < 1) {
throw new IllegalArgumentException("bufferRingSize: " + bufferRingSize + " (expected: > 0)");
}
boolean isPowerOfTwo = (bufferRingSize & (bufferRingSize - 1)) == 0;
if (!isPowerOfTwo) {
throw new IllegalArgumentException("bufferRingSize: " + bufferRingSize + " (expected: power of 2)");
}
return bufferRingSize;
}
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) {
return false;
}
IoUringBufferRingConfig that = (IoUringBufferRingConfig) o;
return bgId == that.bgId;
}
@Override
public int hashCode() {
return Objects.hashCode(bgId);
}
public static Builder builder() {
return new Builder();
}
public static final
|
IoUringBufferRingConfig
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/StateBackend.java
|
{
"start": 9480,
"end": 10058
}
|
interface ____ {
/**
* @return The runtime environment of the executing task.
*/
Environment getEnv();
String getOperatorIdentifier();
@Nonnull
Collection<OperatorStateHandle> getStateHandles();
/**
* @return The registry to which created closeable objects will be * registered during
* restore.
*/
CloseableRegistry getCancelStreamRegistry();
CustomInitializationMetrics getCustomInitializationMetrics();
}
@Experimental
|
OperatorStateBackendParameters
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/BitwiseXorTest.java
|
{
"start": 168,
"end": 639
}
|
class ____ extends TestCase {
public void test_true() throws Exception {
assertTrue(WallUtils.isValidateMySql(//
"SELECT * from t where (id = 1) ^ (1=1)"));
}
public void test_false() throws Exception {
WallConfig config = new WallConfig();
config.setConditionOpBitwiseAllow(false);
assertFalse(WallUtils.isValidateMySql(//
"SELECT * from t where (id = 1) ^ (1=1)", config));
}
}
|
BitwiseXorTest
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-reactive-rest-data-panache/runtime/src/main/java/io/quarkus/hibernate/reactive/rest/data/panache/PanacheRepositoryResource.java
|
{
"start": 955,
"end": 1115
}
|
interface ____<Repository extends PanacheRepositoryBase<Entity, ID>, Entity, ID>
extends ReactiveRestDataResource<Entity, ID> {
}
|
PanacheRepositoryResource
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/binder/VertxHttpClientMetricsTest.java
|
{
"start": 1397,
"end": 5686
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("test-logging.properties")
.overrideConfigKey("quarkus.redis.devservices.enabled", "false")
.withApplicationRoot((jar) -> jar
.addClasses(App.class, HttpClient.class, WsClient.class, Util.class,
ClientDummyTag.class, ClientHeaderTag.class));
final static SimpleMeterRegistry registry = new SimpleMeterRegistry();
@BeforeAll
static void setRegistry() {
Metrics.addRegistry(registry);
}
@AfterAll()
static void removeRegistry() {
Metrics.removeRegistry(registry);
}
@Inject
HttpClient client;
@Inject
App server;
@Inject
WsClient ws;
private Search getMeter(String name) {
return registry.find(name);
}
@Test
void testWebClientMetrics() {
server.start();
client.init();
// If the WS test runs before, some data was already written
double sizeBefore = 0;
if (getMeter("http.client.bytes.written").summary() != null) {
sizeBefore = registry.find("http.client.bytes.written")
.tag("clientName", "my-client")
.summary().totalAmount();
}
try {
Assertions.assertEquals("ok", client.get(null));
Assertions.assertEquals("ok", client.get("bar"));
Assertions.assertEquals("HELLO", client.post("hello"));
Assertions.assertNotNull(getMeter("http.client.connections").longTaskTimer());
Assertions.assertNotNull(getMeter("http.client.active.connections").gauge());
// Body sizes
double expectedBytesWritten = sizeBefore + 5;
await().untilAsserted(
() -> Assertions.assertEquals(expectedBytesWritten,
registry.find("http.client.bytes.written")
.tag("clientName", "my-client").summary().totalAmount()));
await().untilAsserted(() -> Assertions.assertEquals(9,
registry.find("http.client.bytes.read")
.tag("clientName", "my-client").summary().totalAmount()));
await().until(() -> getMeter("http.client.requests").timer().totalTime(TimeUnit.NANOSECONDS) > 0);
await().until(() -> {
// Because of the different tag, the timer got called a single time
return getMeter("http.client.requests").timer().count() == 1;
});
await().until(() -> getMeter("http.client.active.connections").gauge().value() == 1);
Assertions.assertEquals(1, registry.find("http.client.requests")
.tag("uri", "root")
.tag("dummy", "value")
.tag("foo", "UNSET")
.tag("outcome", "SUCCESS").timers().size(),
Util.foundClientRequests(registry, "/ with tag outcome=SUCCESS."));
Assertions.assertEquals(1, registry.find("http.client.requests")
.tag("uri", "root")
.tag("dummy", "value")
.tag("foo", "bar")
.tag("outcome", "SUCCESS").timers().size(),
Util.foundClientRequests(registry, "/ with tag outcome=SUCCESS."));
// Queue
Assertions.assertEquals(3, registry.find("http.client.queue.delay")
.tag("clientName", "my-client").timer().count());
Assertions.assertTrue(registry.find("http.client.queue.delay")
.tag("clientName", "my-client").timer().totalTime(TimeUnit.NANOSECONDS) > 0);
await().until(() -> getMeter("http.client.queue.size").gauge().value() == 0.0);
} finally {
server.stop();
}
}
@Test
void testWebSocket() {
server.start();
try {
ws.send("hello");
ws.send("how are you?");
Assertions.assertNotNull(getMeter("http.client.websocket.connections").gauge());
} finally {
server.stop();
}
}
@ApplicationScoped
static
|
VertxHttpClientMetricsTest
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/view/script/JythonScriptTemplateTests.java
|
{
"start": 1408,
"end": 2733
}
|
class ____ {
@Test
void renderTemplate() throws Exception {
Map<String, Object> model = new HashMap<>();
model.put("title", "Layout example");
model.put("body", "This is the body");
String url = "org/springframework/web/reactive/result/view/script/jython/template.html";
MockServerHttpResponse response = renderViewWithModel(url, model);
assertThat(response.getBodyAsString().block()).isEqualTo("<html><head><title>Layout example</title></head><body><p>This is the body</p></body></html>");
}
private MockServerHttpResponse renderViewWithModel(String viewUrl, Map<String, Object> model) throws Exception {
ScriptTemplateView view = createViewWithUrl(viewUrl);
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/"));
view.renderInternal(model, MediaType.TEXT_HTML, exchange).block();
return exchange.getResponse();
}
private ScriptTemplateView createViewWithUrl(String viewUrl) throws Exception {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(ScriptTemplatingConfiguration.class);
ctx.refresh();
ScriptTemplateView view = new ScriptTemplateView();
view.setApplicationContext(ctx);
view.setUrl(viewUrl);
view.afterPropertiesSet();
return view;
}
@Configuration
static
|
JythonScriptTemplateTests
|
java
|
apache__camel
|
components/camel-http/src/test/java/org/apache/camel/component/http/HttpEndpointURLTest.java
|
{
"start": 1313,
"end": 4490
}
|
class ____ extends CamelTestSupport {
@Test
public void testHttpEndpointURLWithIPv6() {
HttpEndpoint endpoint = (HttpEndpoint) context.getEndpoint("http://[2a00:8a00:6000:40::1413]:30300/test?test=true");
assertEquals("http://[2a00:8a00:6000:40::1413]:30300/test?test=true", endpoint.getHttpUri().toString());
}
@Test
public void testHttpEndpointHttpUri() {
HttpEndpoint http1 = context.getEndpoint("http://www.google.com", HttpEndpoint.class);
HttpEndpoint http2 = context.getEndpoint(
"https://www.google.com?test=parameter&proxyHost=myotherproxy&proxyPort=2345", HttpEndpoint.class);
HttpEndpoint http3 = context.getEndpoint("https://www.google.com?test=parameter", HttpEndpoint.class);
assertEquals("http://www.google.com", http1.getHttpUri().toString(), "Get a wrong HttpUri of http1");
assertEquals("https://www.google.com?test=parameter", http2.getHttpUri().toString(), "Get a wrong HttpUri of http2");
assertEquals(http2.getHttpUri(), http3.getHttpUri(), "Get a wrong HttpUri of http2 andhttp3");
// secure because protocol in remainder is https
HttpEndpoint http4 = context.getEndpoint("http://https://www.google.com", HttpEndpoint.class);
assertEquals("https://www.google.com", http4.getHttpUri().toString(), "Get a wrong HttpUri of http1");
// secure because protocol in remainder is https
HttpEndpoint http5 = context.getEndpoint("https://https://www.google.com", HttpEndpoint.class);
assertEquals("https://www.google.com", http5.getHttpUri().toString(), "Get a wrong HttpUri of http1");
// not secure because protocol in remainder is plain http
HttpEndpoint http6 = context.getEndpoint("https://http://www.google.com", HttpEndpoint.class);
assertEquals("http://www.google.com", http6.getHttpUri().toString(), "Get a wrong HttpUri of http1");
}
@Test
public void testConnectionManagerFromHttpUri() {
HttpEndpoint http1
= context.getEndpoint("http://www.google.com?maxTotalConnections=40&connectionsPerRoute=5", HttpEndpoint.class);
HttpClientConnectionManager connectionManager = http1.getClientConnectionManager();
assertInstanceOf(PoolingHttpClientConnectionManager.class, connectionManager, "Get a wrong type of connection manager");
PoolingHttpClientConnectionManager poolManager = (PoolingHttpClientConnectionManager) connectionManager;
assertEquals(40, poolManager.getMaxTotal(), "Get a wrong setting of maxTotalConnections");
assertEquals(5, poolManager.getDefaultMaxPerRoute(), "Get a wrong setting of connectionsPerRoute");
}
@Test
// Just for CAMEL-8607
public void testRawWithUnsafeCharacters() {
HttpEndpoint http1 = context.getEndpoint(
"http://www.google.com?authenticationPreemptive=true&authPassword=RAW(foo%bar)&authUsername=RAW(username)",
HttpEndpoint.class);
assertTrue(URISupport.sanitizeUri(http1.getEndpointUri()).indexOf("authPassword=xxxxxx") > 0,
"The password is not loggged");
}
}
|
HttpEndpointURLTest
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/aggregate/window/buffers/RecordsWindowBuffer.java
|
{
"start": 7462,
"end": 8877
}
|
class ____ implements WindowBuffer.LocalFactory {
private static final long serialVersionUID = 1L;
private final PagedTypeSerializer<RowData> keySer;
private final AbstractRowDataSerializer<RowData> inputSer;
private final RecordsCombiner.LocalFactory localFactory;
public LocalFactory(
PagedTypeSerializer<RowData> keySer,
AbstractRowDataSerializer<RowData> inputSer,
RecordsCombiner.LocalFactory localFactory) {
this.keySer = keySer;
this.inputSer = inputSer;
this.localFactory = localFactory;
}
@Override
public WindowBuffer create(
Object operatorOwner,
MemoryManager memoryManager,
long memorySize,
RuntimeContext runtimeContext,
Collector<RowData> collector,
ZoneId shiftTimeZone)
throws Exception {
RecordsCombiner combiner =
localFactory.createRecordsCombiner(runtimeContext, collector);
return new RecordsWindowBuffer(
operatorOwner,
memoryManager,
memorySize,
combiner,
keySer,
inputSer,
false,
shiftTimeZone);
}
}
}
|
LocalFactory
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/ClaimCheckEipPushPopExcludeBodyTest.java
|
{
"start": 1024,
"end": 2578
}
|
class ____ extends ContextTestSupport {
@Test
public void testPushPopBodyExclude() throws Exception {
getMockEndpoint("mock:a").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:a").expectedHeaderReceived("foo", 123);
getMockEndpoint("mock:a").expectedHeaderReceived("bar", "Moes");
getMockEndpoint("mock:b").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:b").expectedHeaderReceived("foo", 456);
getMockEndpoint("mock:b").expectedHeaderReceived("bar", "Jacks");
getMockEndpoint("mock:c").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:c").expectedHeaderReceived("foo", 123);
getMockEndpoint("mock:c").expectedHeaderReceived("bar", "Jacks");
template.sendBodyAndHeader("direct:start", "Hello World", "foo", 123);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").setHeader("bar", constant("Moes")).to("mock:a").claimCheck(ClaimCheckOperation.Push)
.transform().constant("Bye World")
.setHeader("foo", constant(456)).setHeader("bar", constant("Jacks")).to("mock:b")
// skip the body and bar header
.claimCheck(ClaimCheckOperation.Pop, null, "-body,-header:bar").to("mock:c");
}
};
}
}
|
ClaimCheckEipPushPopExcludeBodyTest
|
java
|
quarkusio__quarkus
|
integration-tests/grpc-mutual-auth/src/test/java/io/quarkus/grpc/examples/hello/HelloWorldMutualTlsEndpointIT.java
|
{
"start": 119,
"end": 200
}
|
class ____ extends HelloWorldMutualTlsEndpointTest {
}
|
HelloWorldMutualTlsEndpointIT
|
java
|
quarkusio__quarkus
|
extensions/jsonb/runtime/src/test/java/io/quarkus/jsonb/JsonbUnitTest.java
|
{
"start": 970,
"end": 1328
}
|
class ____ implements JsonbAdapter<Instant, Long> {
@Override
public Long adaptToJson(Instant obj) {
return obj.getEpochSecond();
}
@Override
public Instant adaptFromJson(Long obj) {
return Instant.ofEpochSecond(obj);
}
}
}
}
|
EpochSecondsAdapter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java
|
{
"start": 3678,
"end": 56906
}
|
class ____ extends SecurityOnTrialLicenseRestTestCase {
private static final String SYSTEM_USER = "system_user";
private static final SecureString SYSTEM_USER_PASSWORD = new SecureString("system-user-password".toCharArray());
private static final String END_USER = "end_user";
private static final SecureString END_USER_PASSWORD = new SecureString("end-user-password".toCharArray());
private static final String MANAGE_OWN_API_KEY_USER = "manage_own_api_key_user";
private static final String REMOTE_PERMISSIONS_USER = "remote_permissions_user";
private static final String MANAGE_API_KEY_USER = "manage_api_key_user";
private static final String MANAGE_SECURITY_USER = "manage_security_user";
@Before
public void createUsers() throws IOException {
createUser(SYSTEM_USER, SYSTEM_USER_PASSWORD, List.of("system_role"));
createRole("system_role", Set.of("grant_api_key"));
createUser(END_USER, END_USER_PASSWORD, List.of("user_role"));
createRole("user_role", Set.of("monitor"));
createUser(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD, List.of("manage_own_api_key_role"));
createRole("manage_own_api_key_role", Set.of("manage_own_api_key"));
createUser(MANAGE_API_KEY_USER, END_USER_PASSWORD, List.of("manage_api_key_role"));
createRole("manage_api_key_role", Set.of("manage_api_key"));
createUser(MANAGE_SECURITY_USER, END_USER_PASSWORD, List.of("manage_security_role"));
createRoleWithDescription("manage_security_role", Set.of("manage_security"), "Allows all security-related operations!");
}
@After
public void cleanUp() throws IOException {
deleteUser(SYSTEM_USER);
deleteUser(END_USER);
deleteUser(MANAGE_OWN_API_KEY_USER);
deleteUser(MANAGE_API_KEY_USER);
deleteUser(MANAGE_SECURITY_USER);
deleteRole("system_role");
deleteRole("user_role");
deleteRole("manage_own_api_key_role");
deleteRole("manage_api_key_role");
deleteRole("manage_security_role");
invalidateApiKeysForUser(END_USER);
invalidateApiKeysForUser(MANAGE_OWN_API_KEY_USER);
invalidateApiKeysForUser(MANAGE_API_KEY_USER);
invalidateApiKeysForUser(MANAGE_SECURITY_USER);
}
@SuppressWarnings("unchecked")
public void testGetApiKeyRoleDescriptors() throws IOException {
// First key without assigned role descriptors, i.e. it inherits owner user's permission
// This can be achieved by either omitting the role_descriptors field in the request or
// explicitly set it to an empty object.
final Request createApiKeyRequest1 = new Request("POST", "_security/api_key");
if (randomBoolean()) {
createApiKeyRequest1.setJsonEntity("""
{
"name": "k1"
}""");
} else {
createApiKeyRequest1.setJsonEntity("""
{
"name": "k1",
"role_descriptors": { }
}""");
}
assertOK(adminClient().performRequest(createApiKeyRequest1));
// Second key with a single assigned role descriptor
final Request createApiKeyRequest2 = new Request("POST", "_security/api_key");
createApiKeyRequest2.setJsonEntity("""
{
"name": "k2",
"role_descriptors": {
"x": {
"cluster": [
"monitor"
]
}
}
}""");
assertOK(adminClient().performRequest(createApiKeyRequest2));
// Third key with two assigned role descriptors
final Request createApiKeyRequest3 = new Request("POST", "_security/api_key");
createApiKeyRequest3.setJsonEntity("""
{
"name": "k3",
"role_descriptors": {
"x": {
"cluster": [
"monitor"
]
},
"y": {
"indices": [
{
"names": [
"index"
],
"privileges": [
"read"
]
}
]
}
}
}""");
assertOK(adminClient().performRequest(createApiKeyRequest3));
// Role descriptors are returned by both get and query api key calls
final boolean withLimitedBy = randomBoolean();
final List<Map<String, Object>> apiKeyMaps;
if (randomBoolean()) {
final Request getApiKeyRequest = new Request("GET", "_security/api_key");
if (withLimitedBy) {
getApiKeyRequest.addParameter("with_limited_by", "true");
} else if (randomBoolean()) {
getApiKeyRequest.addParameter("with_limited_by", "false");
}
final Response getApiKeyResponse = adminClient().performRequest(getApiKeyRequest);
assertOK(getApiKeyResponse);
apiKeyMaps = (List<Map<String, Object>>) responseAsMap(getApiKeyResponse).get("api_keys");
} else {
final Request queryApiKeyRequest = new Request("POST", "_security/_query/api_key");
if (withLimitedBy) {
queryApiKeyRequest.addParameter("with_limited_by", "true");
} else if (randomBoolean()) {
queryApiKeyRequest.addParameter("with_limited_by", "false");
}
final Response queryApiKeyResponse = adminClient().performRequest(queryApiKeyRequest);
assertOK(queryApiKeyResponse);
apiKeyMaps = (List<Map<String, Object>>) responseAsMap(queryApiKeyResponse).get("api_keys");
}
assertThat(apiKeyMaps.size(), equalTo(3));
for (Map<String, Object> apiKeyMap : apiKeyMaps) {
final String name = (String) apiKeyMap.get("name");
assertThat(apiKeyMap, not(hasKey("access")));
@SuppressWarnings("unchecked")
final var roleDescriptors = (Map<String, Object>) apiKeyMap.get("role_descriptors");
if (withLimitedBy) {
final List<Map<String, Object>> limitedBy = (List<Map<String, Object>>) apiKeyMap.get("limited_by");
assertThat(limitedBy.size(), equalTo(1));
assertThat(
limitedBy.get(0),
equalTo(Map.of(ES_TEST_ROOT_ROLE, XContentTestUtils.convertToMap(ES_TEST_ROOT_ROLE_DESCRIPTOR)))
);
} else {
assertThat(apiKeyMap, not(hasKey("limited_by")));
}
switch (name) {
case "k1" -> {
assertThat(roleDescriptors, anEmptyMap());
}
case "k2" -> {
assertThat(
roleDescriptors,
equalTo(
Map.of("x", XContentTestUtils.convertToMap(new RoleDescriptor("x", new String[] { "monitor" }, null, null)))
)
);
}
case "k3" -> {
assertThat(
roleDescriptors,
equalTo(
Map.of(
"x",
XContentTestUtils.convertToMap(new RoleDescriptor("x", new String[] { "monitor" }, null, null)),
"y",
XContentTestUtils.convertToMap(
new RoleDescriptor(
"y",
null,
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices("index").privileges("read").build() },
null
)
)
)
)
);
}
default -> throw new IllegalStateException("unknown api key name [" + name + "]");
}
}
}
@SuppressWarnings({ "unchecked" })
public void testAuthenticateResponseApiKey() throws IOException {
final String expectedApiKeyName = "my-api-key-name";
final Map<String, String> expectedApiKeyMetadata = Map.of("not", "returned");
final Map<String, Object> createApiKeyRequestBody = Map.of("name", expectedApiKeyName, "metadata", expectedApiKeyMetadata);
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
createApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(createApiKeyRequestBody, XContentType.JSON).utf8ToString());
final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest);
final Map<String, Object> createApiKeyResponseMap = responseAsMap(createApiKeyResponse); // keys: id, name, api_key, encoded
final String actualApiKeyId = (String) createApiKeyResponseMap.get("id");
final String actualApiKeyName = (String) createApiKeyResponseMap.get("name");
final String actualApiKeyEncoded = (String) createApiKeyResponseMap.get("encoded"); // Base64(id:api_key)
assertThat(actualApiKeyId, not(emptyString()));
assertThat(actualApiKeyName, equalTo(expectedApiKeyName));
assertThat(actualApiKeyEncoded, not(emptyString()));
doTestAuthenticationWithApiKey(expectedApiKeyName, actualApiKeyId, actualApiKeyEncoded);
}
public void testGrantApiKeyForOtherUserWithPassword() throws IOException {
Request request = new Request("POST", "_security/api_key/grant");
request.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SYSTEM_USER, SYSTEM_USER_PASSWORD))
);
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "password"),
Map.entry("username", END_USER),
Map.entry("password", END_USER_PASSWORD.toString()),
Map.entry("api_key", Map.of("name", "test_api_key_password"))
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final Response response = client().performRequest(request);
final Map<String, Object> responseBody = entityAsMap(response);
assertThat(responseBody.get("name"), equalTo("test_api_key_password"));
assertThat(responseBody.get("id"), notNullValue());
assertThat(responseBody.get("id"), instanceOf(String.class));
ApiKey apiKey = getApiKey((String) responseBody.get("id"));
assertThat(apiKey.getUsername(), equalTo(END_USER));
assertThat(apiKey.getRealm(), equalTo("default_native"));
assertThat(apiKey.getRealmType(), equalTo("native"));
}
public void testGrantApiKeyForOtherUserWithAccessToken() throws IOException {
final Tuple<String, String> token = super.createOAuthToken(END_USER, END_USER_PASSWORD);
final String accessToken = token.v1();
final Request request = new Request("POST", "_security/api_key/grant");
request.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SYSTEM_USER, SYSTEM_USER_PASSWORD))
);
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "access_token"),
Map.entry("access_token", accessToken),
Map.entry("api_key", Map.of("name", "test_api_key_token", "expiration", "2h"))
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final Instant before = Instant.now();
final Response response = client().performRequest(request);
final Instant after = Instant.now();
final Map<String, Object> responseBody = entityAsMap(response);
assertThat(responseBody.get("name"), equalTo("test_api_key_token"));
assertThat(responseBody.get("id"), notNullValue());
assertThat(responseBody.get("id"), instanceOf(String.class));
ApiKey apiKey = getApiKey((String) responseBody.get("id"));
assertThat(apiKey.getUsername(), equalTo(END_USER));
assertThat(apiKey.getRealm(), equalTo("default_native"));
assertThat(apiKey.getRealmType(), equalTo("native"));
Instant minExpiry = before.plus(2, ChronoUnit.HOURS);
Instant maxExpiry = after.plus(2, ChronoUnit.HOURS);
assertThat(apiKey.getExpiration(), notNullValue());
assertThat(apiKey.getExpiration(), greaterThanOrEqualTo(minExpiry));
assertThat(apiKey.getExpiration(), lessThanOrEqualTo(maxExpiry));
}
public void testGrantApiKeyWithoutApiKeyNameWillFail() throws IOException {
Request request = new Request("POST", "_security/api_key/grant");
request.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SYSTEM_USER, SYSTEM_USER_PASSWORD))
);
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "password"),
Map.entry("username", END_USER),
Map.entry("password", END_USER_PASSWORD.toString())
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertEquals(400, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("api key name is required"));
}
public void testGrantApiKeyWithOnlyManageOwnApiKeyPrivilegeFails() throws IOException {
final Request request = new Request("POST", "_security/api_key/grant");
request.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD))
);
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "password"),
Map.entry("username", MANAGE_OWN_API_KEY_USER),
Map.entry("password", END_USER_PASSWORD.toString()),
Map.entry("api_key", Map.of("name", "test_api_key_password"))
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertEquals(403, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("action [" + GrantApiKeyAction.NAME + "] is unauthorized for user"));
}
public void testApiKeyWithManageRoles() throws IOException {
RoleDescriptor role = roleWithManageRoles("manage-roles-role", new String[] { "manage_own_api_key" }, "allowed-prefix*");
getSecurityClient().putRole(role);
createUser("test-user", END_USER_PASSWORD, List.of("manage-roles-role"));
final Request createApiKeyrequest = new Request("POST", "_security/api_key");
createApiKeyrequest.setOptions(
RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue("test-user", END_USER_PASSWORD))
);
final Map<String, Object> requestBody = Map.of(
"name",
"test-api-key",
"role_descriptors",
Map.of(
"test-role",
XContentTestUtils.convertToMap(roleWithManageRoles("test-role", new String[0], "allowed-prefix*")),
"another-test-role",
// This is not allowed by the limited-by-role (creator of the api key), so should not grant access to not-allowed=prefix*
XContentTestUtils.convertToMap(roleWithManageRoles("another-test-role", new String[0], "not-allowed-prefix*"))
)
);
createApiKeyrequest.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
Map<String, Object> responseMap = responseAsMap(client().performRequest(createApiKeyrequest));
String encodedApiKey = responseMap.get("encoded").toString();
final Request createRoleRequest = new Request("POST", "_security/role/test-role");
createRoleRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encodedApiKey));
// Allowed role by manage roles permission
{
createRoleRequest.setJsonEntity("""
{"indices": [{"names": ["allowed-prefix-test"],"privileges": ["read"]}]}""");
assertOK(client().performRequest(createRoleRequest));
}
// Not allowed role by manage roles permission
{
createRoleRequest.setJsonEntity("""
{"indices": [{"names": ["not-allowed-prefix-test"],"privileges": ["read"]}]}""");
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createRoleRequest));
assertEquals(403, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("this action is granted by the cluster privileges [manage_security,all]"));
}
}
public void testUpdateApiKey() throws IOException {
final var apiKeyName = "my-api-key-name";
final Map<String, Object> apiKeyMetadata = Map.of("not", "returned");
final Map<String, Object> createApiKeyRequestBody = Map.of("name", apiKeyName, "metadata", apiKeyMetadata);
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
createApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(createApiKeyRequestBody, XContentType.JSON).utf8ToString());
createApiKeyRequest.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", headerFromRandomAuthMethod(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD))
);
final Response createApiKeyResponse = client().performRequest(createApiKeyRequest);
final Map<String, Object> createApiKeyResponseMap = responseAsMap(createApiKeyResponse); // keys: id, name, api_key, encoded
final var apiKeyId = (String) createApiKeyResponseMap.get("id");
final var apiKeyEncoded = (String) createApiKeyResponseMap.get("encoded"); // Base64(id:api_key)
assertThat(apiKeyId, not(emptyString()));
assertThat(apiKeyEncoded, not(emptyString()));
doTestUpdateApiKey(apiKeyName, apiKeyId, apiKeyEncoded, apiKeyMetadata);
}
@SuppressWarnings({ "unchecked" })
public void testBulkUpdateApiKey() throws IOException {
final EncodedApiKey apiKeyExpectingUpdate = createApiKey("my-api-key-name-1", Map.of("not", "returned"));
final EncodedApiKey apiKeyExpectingNoop = createApiKey("my-api-key-name-2", Map.of("not", "returned (changed)", "foo", "bar"));
final Map<String, Object> metadataForInvalidatedKey = Map.of("will not be updated", true);
final EncodedApiKey invalidatedApiKey = createApiKey("my-api-key-name-3", metadataForInvalidatedKey);
getSecurityClient().invalidateApiKeys(invalidatedApiKey.id);
final var notFoundApiKeyId = "not-found-api-key-id";
final List<String> idsToUpdate = shuffledList(
List.of(apiKeyExpectingUpdate.id, apiKeyExpectingNoop.id, notFoundApiKeyId, invalidatedApiKey.id)
);
final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update");
final Map<String, Object> expectedApiKeyMetadata = Map.of("not", "returned (changed)", "foo", "bar");
final Map<String, Object> updateApiKeyRequestBody = Map.of("ids", idsToUpdate, "metadata", expectedApiKeyMetadata);
bulkUpdateApiKeyRequest.setJsonEntity(
XContentTestUtils.convertToXContent(updateApiKeyRequestBody, XContentType.JSON).utf8ToString()
);
final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest);
assertOK(bulkUpdateApiKeyResponse);
final Map<String, Object> response = responseAsMap(bulkUpdateApiKeyResponse);
assertEquals(List.of(apiKeyExpectingUpdate.id()), response.get("updated"));
assertEquals(List.of(apiKeyExpectingNoop.id()), response.get("noops"));
final Map<String, Object> errors = (Map<String, Object>) response.get("errors");
assertEquals(2, errors.get("count"));
final Map<String, Map<String, Object>> errorDetails = (Map<String, Map<String, Object>>) errors.get("details");
assertEquals(2, errorDetails.size());
expectErrorFields(
"resource_not_found_exception",
"no API key owned by requesting user found for ID [" + notFoundApiKeyId + "]",
errorDetails.get(notFoundApiKeyId)
);
expectErrorFields(
"illegal_argument_exception",
"cannot update invalidated API key [" + invalidatedApiKey.id + "]",
errorDetails.get(invalidatedApiKey.id)
);
expectMetadata(apiKeyExpectingUpdate.id, expectedApiKeyMetadata);
expectMetadata(apiKeyExpectingNoop.id, expectedApiKeyMetadata);
expectMetadata(invalidatedApiKey.id, metadataForInvalidatedKey);
doTestAuthenticationWithApiKey(apiKeyExpectingUpdate.name, apiKeyExpectingUpdate.id, apiKeyExpectingUpdate.encoded);
doTestAuthenticationWithApiKey(apiKeyExpectingNoop.name, apiKeyExpectingNoop.id, apiKeyExpectingNoop.encoded);
}
public void testBulkUpdateExpirationTimeApiKey() throws IOException {
final EncodedApiKey apiKey1 = createApiKey("my-api-key-name", Map.of());
final EncodedApiKey apiKey2 = createApiKey("my-other-api-key-name", Map.of());
final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update");
final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime();
bulkUpdateApiKeyRequest.setJsonEntity(
XContentTestUtils.convertToXContent(Map.of("ids", List.of(apiKey1.id, apiKey2.id), "expiration", expiration), XContentType.JSON)
.utf8ToString()
);
final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest);
assertOK(bulkUpdateApiKeyResponse);
final Map<String, Object> response = responseAsMap(bulkUpdateApiKeyResponse);
assertEquals(List.of(apiKey1.id(), apiKey2.id()), response.get("updated"));
assertNull(response.get("errors"));
assertEquals(List.of(), response.get("noops"));
}
public void testUpdateBadExpirationTimeApiKey() throws IOException {
final EncodedApiKey apiKey = createApiKey("my-api-key-name", Map.of());
final boolean bulkUpdate = randomBoolean();
TimeValue expiration = randomFrom(TimeValue.ZERO, TimeValue.MINUS_ONE);
final String method;
final Map<String, Object> requestBody;
final String requestPath;
if (bulkUpdate) {
method = "POST";
requestBody = Map.of("expiration", expiration, "ids", List.of(apiKey.id));
requestPath = "_security/api_key/_bulk_update";
} else {
method = "PUT";
requestBody = Map.of("expiration", expiration);
requestPath = "_security/api_key/" + apiKey.id;
}
final var bulkUpdateApiKeyRequest = new Request(method, requestPath);
bulkUpdateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(bulkUpdateApiKeyRequest));
assertEquals(400, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("API key expiration must be in the future"));
}
public void testGrantTargetCanUpdateApiKey() throws IOException {
final var request = new Request("POST", "_security/api_key/grant");
request.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SYSTEM_USER, SYSTEM_USER_PASSWORD))
);
final var apiKeyName = "test_api_key_password";
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "password"),
Map.entry("username", MANAGE_OWN_API_KEY_USER),
Map.entry("password", END_USER_PASSWORD.toString()),
Map.entry("api_key", Map.of("name", apiKeyName))
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final Response response = client().performRequest(request);
final Map<String, Object> createApiKeyResponseMap = responseAsMap(response); // keys: id, name, api_key, encoded
final var apiKeyId = (String) createApiKeyResponseMap.get("id");
final var apiKeyEncoded = (String) createApiKeyResponseMap.get("encoded"); // Base64(id:api_key)
assertThat(apiKeyId, not(emptyString()));
assertThat(apiKeyEncoded, not(emptyString()));
if (randomBoolean()) {
doTestUpdateApiKey(apiKeyName, apiKeyId, apiKeyEncoded, null);
} else {
doTestUpdateApiKeyUsingBulkAction(apiKeyName, apiKeyId, apiKeyEncoded, null);
}
}
@SuppressWarnings({ "unchecked" })
public void testGrantorCannotUpdateApiKeyOfGrantTarget() throws IOException {
final var request = new Request("POST", "_security/api_key/grant");
final var apiKeyName = "test_api_key_password";
final Map<String, Object> requestBody = Map.ofEntries(
Map.entry("grant_type", "password"),
Map.entry("username", MANAGE_OWN_API_KEY_USER),
Map.entry("password", END_USER_PASSWORD.toString()),
Map.entry("api_key", Map.of("name", apiKeyName))
);
request.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString());
final Response response = adminClient().performRequest(request);
final Map<String, Object> createApiKeyResponseMap = responseAsMap(response); // keys: id, name, api_key, encoded
final var apiKeyId = (String) createApiKeyResponseMap.get("id");
final var apiKeyEncoded = (String) createApiKeyResponseMap.get("encoded"); // Base64(id:api_key)
assertThat(apiKeyId, not(emptyString()));
assertThat(apiKeyEncoded, not(emptyString()));
final var updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId);
updateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(Map.of(), XContentType.JSON).utf8ToString());
final ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateApiKeyRequest));
assertEquals(404, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("no API key owned by requesting user found for ID [" + apiKeyId + "]"));
// Bulk update also not allowed
final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update");
bulkUpdateApiKeyRequest.setJsonEntity(
XContentTestUtils.convertToXContent(Map.of("ids", List.of(apiKeyId)), XContentType.JSON).utf8ToString()
);
final Response bulkUpdateApiKeyResponse = adminClient().performRequest(bulkUpdateApiKeyRequest);
assertOK(bulkUpdateApiKeyResponse);
final Map<String, Object> bulkUpdateApiKeyResponseMap = responseAsMap(bulkUpdateApiKeyResponse);
assertThat((List<String>) bulkUpdateApiKeyResponseMap.get("updated"), empty());
assertThat((List<String>) bulkUpdateApiKeyResponseMap.get("noops"), empty());
final Map<String, Object> errors = (Map<String, Object>) bulkUpdateApiKeyResponseMap.get("errors");
assertEquals(1, errors.get("count"));
final Map<String, Map<String, Object>> errorDetails = (Map<String, Map<String, Object>>) errors.get("details");
assertEquals(1, errorDetails.size());
expectErrorFields(
"resource_not_found_exception",
"no API key owned by requesting user found for ID [" + apiKeyId + "]",
errorDetails.get(apiKeyId)
);
}
public void testGetPrivilegesForApiKeyWorksIfItDoesNotHaveAssignedPrivileges() throws IOException {
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
if (randomBoolean()) {
createApiKeyRequest.setJsonEntity("""
{ "name": "k1" }""");
} else {
createApiKeyRequest.setJsonEntity("""
{
"name": "k1",
"role_descriptors": { }
}""");
}
final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest);
assertOK(createApiKeyResponse);
final Request getPrivilegesRequest = new Request("GET", "_security/user/_privileges");
getPrivilegesRequest.setOptions(
RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + responseAsMap(createApiKeyResponse).get("encoded"))
);
final Response getPrivilegesResponse = client().performRequest(getPrivilegesRequest);
assertOK(getPrivilegesResponse);
assertThat(responseAsMap(getPrivilegesResponse), equalTo(XContentHelper.convertToMap(JsonXContent.jsonXContent, """
{
"cluster": [
"all"
],
"global": [],
"indices": [
{
"names": [
"*"
],
"privileges": [
"all"
],
"allow_restricted_indices": true
}
],
"applications": [
{
"application": "*",
"privileges": [
"*"
],
"resources": [
"*"
]
}
],
"run_as": [
"*"
]
}""", false)));
}
public void testGetPrivilegesForApiKeyThrows400IfItHasAssignedPrivileges() throws IOException {
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
createApiKeyRequest.setJsonEntity("""
{
"name": "k1",
"role_descriptors": { "a": { "cluster": ["monitor"] } }
}""");
final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest);
assertOK(createApiKeyResponse);
final Request getPrivilegesRequest = new Request("GET", "_security/user/_privileges");
getPrivilegesRequest.setOptions(
RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + responseAsMap(createApiKeyResponse).get("encoded"))
);
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getPrivilegesRequest));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
assertThat(
e.getMessage(),
containsString(
"Cannot retrieve privileges for API keys with assigned role descriptors. "
+ "Please use the Get API key information API https://ela.st/es-api-get-api-key"
)
);
}
public void testRemoteIndicesSupportForApiKeys() throws IOException {
createUser(REMOTE_PERMISSIONS_USER, END_USER_PASSWORD, List.of("remote_indices_role"));
createRole("remote_indices_role", Set.of("grant_api_key", "manage_own_api_key"), "remote");
final String remoteIndicesSection = """
"remote_indices": [
{
"names": ["index-a", "*"],
"privileges": ["read"],
"clusters": ["remote-a", "*"]
}
]""";
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
final boolean includeRemoteIndices = randomBoolean();
createApiKeyRequest.setJsonEntity(Strings.format("""
{"name": "k1", "role_descriptors": {"r1": {%s}}}""", includeRemoteIndices ? remoteIndicesSection : ""));
Response response = sendRequestWithRemoteIndices(createApiKeyRequest, false == includeRemoteIndices);
String apiKeyId = ObjectPath.createFromResponse(response).evaluate("id");
assertThat(apiKeyId, notNullValue());
assertOK(response);
final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant");
grantApiKeyRequest.setJsonEntity(
Strings.format(
"""
{
"grant_type":"password",
"username":"%s",
"password":"end-user-password",
"api_key":{
"name":"k1",
"role_descriptors":{
"r1":{
%s
}
}
}
}""",
includeRemoteIndices ? MANAGE_OWN_API_KEY_USER : REMOTE_PERMISSIONS_USER,
includeRemoteIndices ? remoteIndicesSection : ""
)
);
response = sendRequestWithRemoteIndices(grantApiKeyRequest, false == includeRemoteIndices);
final String updatedRemoteIndicesSection = """
"remote_indices": [
{
"names": ["index-b", "index-a"],
"privileges": ["read"],
"clusters": ["remote-a", "remote-b"]
}
]""";
final Request updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId);
updateApiKeyRequest.setJsonEntity(Strings.format("""
{
"role_descriptors": {
"r1": {
%s
}
}
}""", includeRemoteIndices ? updatedRemoteIndicesSection : ""));
response = sendRequestWithRemoteIndices(updateApiKeyRequest, false == includeRemoteIndices);
assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), equalTo(includeRemoteIndices));
final String bulkUpdatedRemoteIndicesSection = """
"remote_indices": [
{
"names": ["index-c"],
"privileges": ["read"],
"clusters": ["remote-a", "remote-c"]
}
]""";
final Request bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update");
bulkUpdateApiKeyRequest.setJsonEntity(Strings.format("""
{
"ids": ["%s"],
"role_descriptors": {
"r1": {
%s
}
}
}""", apiKeyId, includeRemoteIndices ? bulkUpdatedRemoteIndicesSection : ""));
response = sendRequestWithRemoteIndices(bulkUpdateApiKeyRequest, false == includeRemoteIndices);
if (includeRemoteIndices) {
assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), contains(apiKeyId));
} else {
assertThat(ObjectPath.createFromResponse(response).evaluate("noops"), contains(apiKeyId));
}
deleteUser(REMOTE_PERMISSIONS_USER);
deleteRole("remote_indices_role");
}
public void testRemoteClusterSupportForApiKeys() throws IOException {
createUser(REMOTE_PERMISSIONS_USER, END_USER_PASSWORD, List.of("remote_cluster_role"));
createRole("remote_cluster_role", Set.of("grant_api_key", "manage_own_api_key"), "remote");
final String remoteClusterSectionTemplate = """
"remote_cluster": [
{
"privileges": ["monitor_enrich"],
"clusters": [%s]
}
]""";
String remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"remote-a\", \"*\"");
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
final boolean includeRemoteCluster = randomBoolean();
createApiKeyRequest.setJsonEntity(Strings.format("""
{"name": "k1", "role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : ""));
// create API key as the admin user which does not have any remote_cluster limited_by permissions
Response response = sendRequestAsAdminUser(createApiKeyRequest);
String apiKeyId = ObjectPath.createFromResponse(response).evaluate("id");
assertThat(apiKeyId, notNullValue());
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, null, null);
// update that API key (as the admin user)
Request updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId);
remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"foo\", \"bar\"");
updateApiKeyRequest.setJsonEntity(Strings.format("""
{"role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : ""));
response = sendRequestAsAdminUser(updateApiKeyRequest);
assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), equalTo(includeRemoteCluster));
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, null, new String[] { "foo", "bar" });
// create API key as the remote user which has all remote_cluster permissions via limited_by
response = sendRequestAsRemoteUser(createApiKeyRequest);
apiKeyId = ObjectPath.createFromResponse(response).evaluate("id");
assertThat(apiKeyId, notNullValue());
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, null);
// update that API key (as the remote user)
updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId);
remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"foo\", \"bar\"");
updateApiKeyRequest.setJsonEntity(Strings.format("""
{"role_descriptors": {"r1": {%s}}}""", includeRemoteCluster ? remoteClusterSection : ""));
response = sendRequestAsRemoteUser(updateApiKeyRequest);
assertThat(ObjectPath.createFromResponse(response).evaluate("updated"), equalTo(includeRemoteCluster));
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, new String[] { "foo", "bar" });
// reset the clusters to the original value and setup grant API key requests
remoteClusterSection = Strings.format(remoteClusterSectionTemplate, "\"remote-a\", \"*\"");
final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant");
String getApiKeyRequestTemplate = """
{
"grant_type":"password",
"username":"%s",
"password":"end-user-password",
"api_key":{
"name":"k1",
"role_descriptors":{
"r1":{
%s
}
}
}
}""";
// grant API key as the remote user which does remote_cluster limited_by permissions
grantApiKeyRequest.setJsonEntity(
Strings.format(getApiKeyRequestTemplate, REMOTE_PERMISSIONS_USER, includeRemoteCluster ? remoteClusterSection : "")
);
response = sendRequestAsRemoteUser(grantApiKeyRequest);
apiKeyId = ObjectPath.createFromResponse(response).evaluate("id");
assertThat(apiKeyId, notNullValue());
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, true, null, null);
// grant API key as a different user which does not have remote_cluster limited_by permissions
grantApiKeyRequest.setJsonEntity(
Strings.format(getApiKeyRequestTemplate, MANAGE_OWN_API_KEY_USER, includeRemoteCluster ? remoteClusterSection : "")
);
response = sendRequestAsRemoteUser(grantApiKeyRequest);
apiKeyId = ObjectPath.createFromResponse(response).evaluate("id");
assertThat(apiKeyId, notNullValue());
assertOK(response);
assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, "manage_own_api_key_role", null);
// clean up
deleteUser(REMOTE_PERMISSIONS_USER);
deleteRole("remote_cluster_role");
}
@SuppressWarnings("unchecked")
private void assertAPIKeyWithRemoteClusterPermissions(
String apiKeyId,
boolean hasRemoteClusterInBaseRole,
boolean hasRemoteClusterInLimitedByRole,
@Nullable String limitedByRoleName,
@Nullable String[] baseRoleClusters
) throws IOException {
Request getAPIKeyRequest = new Request("GET", String.format("_security/api_key?id=%s&with_limited_by=true", apiKeyId));
Response response = sendRequestAsAdminUser(getAPIKeyRequest);
Map<String, Map<String, ?>> root = ObjectPath.createFromResponse(response).evaluate("api_keys.0");
if (hasRemoteClusterInBaseRole) {
// explicit permissions
baseRoleClusters = baseRoleClusters == null ? new String[] { "remote-a", "*" } : baseRoleClusters;
Map<String, Map<String, ?>> roleDescriptors = (Map<String, Map<String, ?>>) root.get("role_descriptors");
List<Map<String, List<String>>> remoteCluster = (List<Map<String, List<String>>>) roleDescriptors.get("r1")
.get("remote_cluster");
assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_enrich"));
assertThat(remoteCluster.get(0).get("clusters"), containsInAnyOrder(baseRoleClusters));
} else {
// no explicit permissions
Map<String, Map<String, ?>> roleDescriptors = (Map<String, Map<String, ?>>) root.get("role_descriptors");
Map<String, List<String>> baseRole = (Map<String, List<String>>) roleDescriptors.get("r1");
assertNotNull(baseRole);
assertNull(baseRole.get("remote_cluster"));
}
if (hasRemoteClusterInLimitedByRole) {
// has limited by permissions
limitedByRoleName = limitedByRoleName == null ? "remote_cluster_role" : limitedByRoleName;
List<Map<String, List<String>>> limitedBy = (List<Map<String, List<String>>>) root.get("limited_by");
Map<String, Collection<?>> limitedByRole = (Map<String, Collection<?>>) limitedBy.get(0).get(limitedByRoleName);
assertNotNull(limitedByRole);
List<Map<String, List<String>>> remoteCluster = (List<Map<String, List<String>>>) limitedByRole.get("remote_cluster");
assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_stats", "monitor_enrich"));
assertThat(remoteCluster.get(0).get("clusters"), containsInAnyOrder("remote"));
} else {
// no limited by permissions
limitedByRoleName = limitedByRoleName == null ? "_es_test_root" : limitedByRoleName;
List<Map<String, List<String>>> limitedBy = (List<Map<String, List<String>>>) root.get("limited_by");
Map<String, Collection<?>> limitedByRole = (Map<String, Collection<?>>) limitedBy.get(0).get(limitedByRoleName);
assertNotNull(limitedByRole);
assertNull(limitedByRole.get("remote_cluster"));
}
}
@SuppressWarnings("unchecked")
public void testQueryCrossClusterApiKeysByType() throws IOException {
final List<String> apiKeyIds = new ArrayList<>(3);
for (int i = 0; i < randomIntBetween(3, 5); i++) {
Request createRequest = new Request("POST", "/_security/cross_cluster/api_key");
createRequest.setJsonEntity(Strings.format("""
{
"name": "test-cross-key-query-%d",
"access": {
"search": [
{
"names": [ "whatever" ]
}
]
},
"metadata": { "tag": %d, "label": "rest" }
}""", i, i));
setUserForRequest(createRequest, MANAGE_SECURITY_USER, END_USER_PASSWORD);
ObjectPath createResponse = assertOKAndCreateObjectPath(client().performRequest(createRequest));
apiKeyIds.add(createResponse.evaluate("id"));
}
// the "cross_cluster" keys are not "rest" type
for (String restTypeQuery : List.of("""
{"query": {"term": {"type": "rest" }}}""", """
{"query": {"bool": {"must_not": {"term": {"type": "cross_cluster"}}}}}""", """
{"query": {"simple_query_string": {"query": "re* rest -cross_cluster", "fields": ["ty*"]}}}""", """
{"query": {"simple_query_string": {"query": "-cross*", "fields": ["type"]}}}""", """
{"query": {"prefix": {"type": "re" }}}""", """
{"query": {"wildcard": {"type": "r*t" }}}""", """
{"query": {"range": {"type": {"gte": "raaa", "lte": "rzzz"}}}}""")) {
Request queryRequest = new Request("GET", "/_security/_query/api_key");
queryRequest.addParameter("with_limited_by", String.valueOf(randomBoolean()));
queryRequest.setJsonEntity(restTypeQuery);
setUserForRequest(queryRequest, MANAGE_API_KEY_USER, END_USER_PASSWORD);
ObjectPath queryResponse = assertOKAndCreateObjectPath(client().performRequest(queryRequest));
assertThat(queryResponse.evaluate("total"), is(0));
assertThat(queryResponse.evaluate("count"), is(0));
assertThat(queryResponse.evaluate("api_keys"), iterableWithSize(0));
}
for (String crossClusterTypeQuery : List.of("""
{"query": {"term": {"type": "cross_cluster" }}}""", """
{"query": {"bool": {"must_not": {"term": {"type": "rest"}}}}}""", """
{"query": {"simple_query_string": {"query": "cro* cross_cluster -re*", "fields": ["ty*"]}}}""", """
{"query": {"simple_query_string": {"query": "-re*", "fields": ["type"]}}}""", """
{"query": {"prefix": {"type": "cro" }}}""", """
{"query": {"wildcard": {"type": "*oss_*er" }}}""", """
{"query": {"range": {"type": {"gte": "cross", "lte": "zzzz"}}}}""")) {
Request queryRequest = new Request("GET", "/_security/_query/api_key");
queryRequest.addParameter("with_limited_by", String.valueOf(randomBoolean()));
queryRequest.setJsonEntity(crossClusterTypeQuery);
setUserForRequest(queryRequest, MANAGE_API_KEY_USER, END_USER_PASSWORD);
ObjectPath queryResponse = assertOKAndCreateObjectPath(client().performRequest(queryRequest));
assertThat(queryResponse.evaluate("total"), is(apiKeyIds.size()));
assertThat(queryResponse.evaluate("count"), is(apiKeyIds.size()));
assertThat(queryResponse.evaluate("api_keys"), iterableWithSize(apiKeyIds.size()));
Iterator<?> apiKeys = ((List<?>) queryResponse.evaluate("api_keys")).iterator();
while (apiKeys.hasNext()) {
assertThat(apiKeyIds, hasItem((String) ((Map<String, Object>) apiKeys.next()).get("id")));
}
}
final Request queryRequest = new Request("GET", "/_security/_query/api_key");
queryRequest.addParameter("with_limited_by", String.valueOf(randomBoolean()));
queryRequest.setJsonEntity("""
{"query": {"bool": {"must": [{"term": {"type": "cross_cluster" }}, {"term": {"metadata.tag": 2}}]}}}""");
setUserForRequest(queryRequest, MANAGE_API_KEY_USER, END_USER_PASSWORD);
final ObjectPath queryResponse = assertOKAndCreateObjectPath(client().performRequest(queryRequest));
assertThat(queryResponse.evaluate("total"), is(1));
assertThat(queryResponse.evaluate("count"), is(1));
assertThat(queryResponse.evaluate("api_keys.0.name"), is("test-cross-key-query-2"));
}
public void testSortApiKeysByType() throws IOException {
List<String> apiKeyIds = new ArrayList<>(2);
// create regular api key
EncodedApiKey encodedApiKey = createApiKey("test-rest-key", Map.of("tag", "rest"));
apiKeyIds.add(encodedApiKey.id());
// create cross-cluster key
Request createRequest = new Request("POST", "/_security/cross_cluster/api_key");
createRequest.setJsonEntity("""
{
"name": "test-cross-key",
"access": {
"search": [
{
"names": [ "whatever" ]
}
]
},
"metadata": { "tag": "cross" }
}""");
setUserForRequest(createRequest, MANAGE_SECURITY_USER, END_USER_PASSWORD);
ObjectPath createResponse = assertOKAndCreateObjectPath(client().performRequest(createRequest));
apiKeyIds.add(createResponse.evaluate("id"));
// desc sort all (2) keys - by type
Request queryRequest = new Request("GET", "/_security/_query/api_key");
queryRequest.addParameter("with_limited_by", String.valueOf(randomBoolean()));
queryRequest.setJsonEntity("""
{"sort":[{"type":{"order":"desc"}}]}""");
setUserForRequest(queryRequest, MANAGE_API_KEY_USER, END_USER_PASSWORD);
ObjectPath queryResponse = assertOKAndCreateObjectPath(client().performRequest(queryRequest));
assertThat(queryResponse.evaluate("total"), is(2));
assertThat(queryResponse.evaluate("count"), is(2));
assertThat(queryResponse.evaluate("api_keys.0.id"), is(apiKeyIds.get(0)));
assertThat(queryResponse.evaluate("api_keys.0.type"), is("rest"));
assertThat(queryResponse.evaluate("api_keys.1.id"), is(apiKeyIds.get(1)));
assertThat(queryResponse.evaluate("api_keys.1.type"), is("cross_cluster"));
// asc sort all (2) keys - by type
queryRequest = new Request("GET", "/_security/_query/api_key");
queryRequest.addParameter("with_limited_by", String.valueOf(randomBoolean()));
queryRequest.setJsonEntity("""
{"sort":[{"type":{"order":"asc"}}]}""");
setUserForRequest(queryRequest, MANAGE_API_KEY_USER, END_USER_PASSWORD);
queryResponse = assertOKAndCreateObjectPath(client().performRequest(queryRequest));
assertThat(queryResponse.evaluate("total"), is(2));
assertThat(queryResponse.evaluate("count"), is(2));
assertThat(queryResponse.evaluate("api_keys.0.id"), is(apiKeyIds.get(1)));
assertThat(queryResponse.evaluate("api_keys.0.type"), is("cross_cluster"));
assertThat(queryResponse.evaluate("api_keys.1.id"), is(apiKeyIds.get(0)));
assertThat(queryResponse.evaluate("api_keys.1.type"), is("rest"));
}
public void testCreateCrossClusterApiKey() throws IOException {
final Request createRequest = new Request("POST", "/_security/cross_cluster/api_key");
createRequest.setJsonEntity("""
{
"name": "my-key",
"access": {
"search": [
{
"names": [ "metrics" ]
}
],
"replication": [
{
"names": [ "logs" ],
"allow_restricted_indices": true
}
]
},
"expiration": "7d",
"metadata": { "tag": "shared", "points": 0 }
}""");
setUserForRequest(createRequest, MANAGE_SECURITY_USER, END_USER_PASSWORD);
final ObjectPath createResponse = assertOKAndCreateObjectPath(client().performRequest(createRequest));
final String apiKeyId = createResponse.evaluate("id");
// Cross cluster API key cannot be used on the REST
|
ApiKeyRestIT
|
java
|
apache__dubbo
|
dubbo-demo/dubbo-demo-mcp-server/src/main/java/org/apache/dubbo/mcp/server/demo/demo/NestedDetail.java
|
{
"start": 857,
"end": 1471
}
|
class ____ {
private String detailInfo;
private Double value;
// Default constructor
public NestedDetail() {}
// Getters and Setters
public String getDetailInfo() {
return detailInfo;
}
public void setDetailInfo(String detailInfo) {
this.detailInfo = detailInfo;
}
public Double getValue() {
return value;
}
public void setValue(Double value) {
this.value = value;
}
@Override
public String toString() {
return "NestedDetail{" + "detailInfo='" + detailInfo + '\'' + ", value=" + value + '}';
}
}
|
NestedDetail
|
java
|
elastic__elasticsearch
|
modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java
|
{
"start": 2405,
"end": 17257
}
|
class ____ extends AggregationMultiBucketAggregationTestCase<InternalAutoDateHistogram> {
protected InternalAutoDateHistogram createTestInstance(
String name,
Map<String, Object> metadata,
InternalAggregations aggregations,
long startingDate,
RoundingInfo[] roundingInfos,
int roundingIndex,
DocValueFormat format
) {
int nbBuckets = randomNumberOfBuckets();
int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1);
List<InternalAutoDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
long interval = randomIntBetween(1, 3);
long intervalMillis = roundingInfos[roundingIndex].roughEstimateDurationMillis * interval;
for (int i = 0; i < nbBuckets; i++) {
long key = startingDate + (intervalMillis * i);
buckets.add(i, new InternalAutoDateHistogram.Bucket(key, randomIntBetween(1, 100), format, aggregations));
}
BucketInfo bucketInfo = new BucketInfo(roundingInfos, roundingIndex, InternalAggregations.EMPTY);
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, metadata, randomNonNegativeLong());
}
@Override
protected boolean supportsSampling() {
return true;
}
@Override
protected InternalAutoDateHistogram createTestInstance(String name, Map<String, Object> metadata, InternalAggregations aggregations) {
RoundingInfo[] roundingInfos = AutoDateHistogramAggregationBuilder.buildRoundings(null, null);
int roundingIndex = between(0, roundingInfos.length - 1);
return createTestInstance(
name,
metadata,
aggregations,
randomLongBetween(0, utcMillis("2050-01-01")),
roundingInfos,
roundingIndex,
randomDateDocValueFormat()
);
}
/*
* This test was added to reproduce a bug where getAppropriateRounding was only ever using the first innerIntervals
* passed in, instead of using the interval associated with the loop.
*/
public void testGetAppropriateRoundingUsesCorrectIntervals() {
RoundingInfo[] roundings = new RoundingInfo[6];
ZoneId timeZone = ZoneOffset.UTC;
// Since we pass 0 as the starting index to getAppropriateRounding, we'll also use
// an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval()
// will be larger than the estimate.
roundings[0] = new RoundingInfo(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone, 1000L, "s", 1000);
roundings[1] = new RoundingInfo(Rounding.DateTimeUnit.MINUTE_OF_HOUR, timeZone, 60 * 1000L, "m", 1, 5, 10, 30);
roundings[2] = new RoundingInfo(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone, 60 * 60 * 1000L, "h", 1, 3, 12);
OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC);
// We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function
// to increment the rounding (because the bug was that the function would not use the innerIntervals
// from the new rounding.
int result = InternalAutoDateHistogram.getAppropriateRounding(
timestamp.toEpochSecond() * 1000,
timestamp.plusDays(1).toEpochSecond() * 1000,
0,
roundings,
25
);
assertThat(result, equalTo(2));
}
@Override
protected BuilderAndToReduce<InternalAutoDateHistogram> randomResultsToReduce(String name, int size) {
long startingDate = randomLongBetween(0, utcMillis("2050-01-01"));
RoundingInfo[] roundingInfos = AutoDateHistogramAggregationBuilder.buildRoundings(null, null);
int roundingIndex = between(0, roundingInfos.length - 1);
DocValueFormat format = randomDateDocValueFormat();
List<InternalAutoDateHistogram> result = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
long thisResultStart = startingDate;
thisResultStart += usually() ? 0 : randomFrom(TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS).toMillis(between(1, 10000));
result.add(createTestInstance(name, null, InternalAggregations.EMPTY, thisResultStart, roundingInfos, roundingIndex, format));
}
return new BuilderAndToReduce<>(mock(AggregationBuilder.class), result);
}
@Override
protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAutoDateHistogram> inputs) {
int totalBucketConut = 0;
long lowest = Long.MAX_VALUE;
long highest = 0;
for (InternalAutoDateHistogram histogram : inputs) {
for (Histogram.Bucket bucket : histogram.getBuckets()) {
long bucketKey = ((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli();
if (bucketKey < lowest) {
lowest = bucketKey;
}
if (bucketKey > highest) {
highest = bucketKey;
}
totalBucketConut++;
}
}
int roundingIndex = reduced.getBucketInfo().roundingIdx;
RoundingInfo roundingInfo = AutoDateHistogramAggregationBuilder.buildRoundings(null, null)[roundingIndex];
Rounding.Prepared prepared = totalBucketConut > 0 ? roundingInfo.rounding.prepare(lowest, highest) : null;
long normalizedDuration = (highest - lowest) / roundingInfo.getRoughEstimateDurationMillis();
int innerIntervalIndex = 0;
/*
* Guess the interval to use based on the roughly estimated
* duration. It'll be accurate or it'll produce more buckets
* than we need but it is quick.
*/
if (normalizedDuration != 0) {
for (int j = roundingInfo.innerIntervals.length - 1; j >= 0; j--) {
int interval = roundingInfo.innerIntervals[j];
if (normalizedDuration / interval < reduced.getBuckets().size()) {
innerIntervalIndex = j;
}
}
}
/*
* Next pick smaller intervals until we find the one that makes the right
* number of buckets.
*/
int innerIntervalToUse;
if (totalBucketConut == 0) {
innerIntervalToUse = roundingInfo.innerIntervals[0];
} else {
do {
innerIntervalToUse = roundingInfo.innerIntervals[innerIntervalIndex];
int bucketCountAtInterval = getBucketCount(lowest, highest, prepared, innerIntervalToUse);
if (bucketCountAtInterval == reduced.getBuckets().size()) {
break;
}
if (bucketCountAtInterval < reduced.getBuckets().size()) {
innerIntervalToUse = roundingInfo.innerIntervals[Math.max(0, innerIntervalIndex - 1)];
break;
}
} while (++innerIntervalIndex < roundingInfo.innerIntervals.length);
}
assertThat(reduced.getInterval().toString(), equalTo(innerIntervalToUse + roundingInfo.unitAbbreviation));
Map<Instant, Long> expectedCounts = new TreeMap<>();
if (totalBucketConut > 0) {
long keyForBucket = prepared.round(lowest);
while (keyForBucket <= prepared.round(highest)) {
long nextKey = keyForBucket;
for (int i = 0; i < innerIntervalToUse; i++) {
nextKey = prepared.nextRoundingValue(nextKey);
}
Instant key = Instant.ofEpochMilli(keyForBucket);
expectedCounts.put(key, 0L);
// Iterate through the input buckets, and for each bucket, determine if it's inside
// the range of the bucket in the outer loop. if it is, add the doc count to the total
// for that bucket.
for (InternalAutoDateHistogram histogram : inputs) {
for (Histogram.Bucket bucket : histogram.getBuckets()) {
long roundedBucketKey = prepared.round(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli());
long docCount = bucket.getDocCount();
if (roundedBucketKey >= keyForBucket && roundedBucketKey < nextKey) {
expectedCounts.compute(key, (k, oldValue) -> (oldValue == null ? 0 : oldValue) + docCount);
}
}
}
keyForBucket = nextKey;
}
// If there is only a single bucket, and we haven't added it above, add a bucket with no documents.
// this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above.
if (prepared.round(lowest) == prepared.round(highest) && expectedCounts.isEmpty()) {
expectedCounts.put(Instant.ofEpochMilli(prepared.round(lowest)), 0L);
}
}
// pick out the actual reduced values to the make the assertion more readable
Map<Instant, Long> actualCounts = new TreeMap<>();
for (Histogram.Bucket bucket : reduced.getBuckets()) {
actualCounts.compute(
((ZonedDateTime) bucket.getKey()).toInstant(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()
);
}
assertEquals(expectedCounts, actualCounts);
DateHistogramInterval expectedInterval;
if (reduced.getBuckets().size() == 1) {
expectedInterval = reduced.getInterval();
} else {
expectedInterval = new DateHistogramInterval(innerIntervalToUse + roundingInfo.unitAbbreviation);
}
assertThat(reduced.getInterval(), equalTo(expectedInterval));
}
private int getBucketCount(long min, long max, Rounding.Prepared prepared, int interval) {
int bucketCount = 0;
long key = prepared.round(min);
while (key < max) {
for (int i = 0; i < interval; i++) {
key = prepared.nextRoundingValue(key);
}
bucketCount++;
}
return bucketCount;
}
@Override
protected InternalAutoDateHistogram mutateInstance(InternalAutoDateHistogram instance) {
String name = instance.getName();
List<InternalAutoDateHistogram.Bucket> buckets = instance.getBuckets();
int targetBuckets = instance.getTargetBuckets();
BucketInfo bucketInfo = instance.getBucketInfo();
Map<String, Object> metadata = instance.getMetadata();
long interval = instance.getBucketInnerInterval();
switch (between(0, 4)) {
case 0 -> name += randomAlphaOfLength(5);
case 1 -> {
buckets = new ArrayList<>(buckets);
buckets.add(
new InternalAutoDateHistogram.Bucket(
randomNonNegativeLong(),
randomIntBetween(1, 100),
instance.getFormatter(),
InternalAggregations.EMPTY
)
);
}
case 2 -> {
int roundingIdx = bucketInfo.roundingIdx == bucketInfo.roundingInfos.length - 1 ? 0 : bucketInfo.roundingIdx + 1;
bucketInfo = new BucketInfo(bucketInfo.roundingInfos, roundingIdx, bucketInfo.emptySubAggregations);
}
case 3 -> {
if (metadata == null) {
metadata = Maps.newMapWithExpectedSize(1);
} else {
metadata = new HashMap<>(instance.getMetadata());
}
metadata.put(randomAlphaOfLength(15), randomInt());
}
case 4 -> interval = randomNonNegativeLong();
default -> throw new AssertionError("Illegal randomisation branch");
}
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, instance.getFormatter(), metadata, interval);
}
public void testReduceSecond() {
InternalAutoDateHistogram h = new ReduceTestBuilder(10).bucket("1970-01-01T00:00:01", 1)
.bucket("1970-01-01T00:00:02", 1)
.bucket("1970-01-01T00:00:03", 1)
.finishShardResult("s", 1)
.bucket("1970-01-01T00:00:03", 1)
.bucket("1970-01-01T00:00:04", 1)
.finishShardResult("s", 1)
.reduce();
assertThat(
keys(h),
equalTo(Arrays.asList("1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1970-01-01T00:00:03Z", "1970-01-01T00:00:04Z"))
);
assertThat(docCounts(h), equalTo(Arrays.asList(1, 1, 2, 1)));
}
public void testReduceThirtySeconds() {
InternalAutoDateHistogram h = new ReduceTestBuilder(10).bucket("1970-01-01T00:00:00", 1)
.bucket("1970-01-01T00:00:30", 1)
.bucket("1970-01-01T00:02:00", 1)
.finishShardResult("s", 1)
.bucket("1970-01-01T00:00:30", 1)
.bucket("1970-01-01T00:01:00", 1)
.finishShardResult("s", 1)
.reduce();
assertThat(
keys(h),
equalTo(
Arrays.asList(
"1970-01-01T00:00:00Z",
"1970-01-01T00:00:30Z",
"1970-01-01T00:01:00Z",
"1970-01-01T00:01:30Z",
"1970-01-01T00:02:00Z"
)
)
);
assertThat(docCounts(h), equalTo(Arrays.asList(1, 2, 1, 0, 1)));
}
public void testReduceBumpsInnerRange() {
InternalAutoDateHistogram h = new ReduceTestBuilder(2).bucket("1970-01-01T00:00:01", 1)
.bucket("1970-01-01T00:00:02", 1)
.finishShardResult("s", 1)
.bucket("1970-01-01T00:00:00", 1)
.bucket("1970-01-01T00:00:05", 1)
.finishShardResult("s", 5)
.reduce();
assertThat(keys(h), equalTo(Arrays.asList("1970-01-01T00:00:00Z", "1970-01-01T00:00:05Z")));
assertThat(docCounts(h), equalTo(Arrays.asList(3, 1)));
}
public void testReduceBumpsRounding() {
InternalAutoDateHistogram h = new ReduceTestBuilder(2).bucket("1970-01-01T00:00:01", 1)
.bucket("1970-01-01T00:00:02", 1)
.finishShardResult("s", 1)
.bucket("1970-01-01T00:00:00", 1)
.bucket("1970-01-01T00:01:00", 1)
.finishShardResult("m", 1)
.reduce();
assertThat(keys(h), equalTo(Arrays.asList("1970-01-01T00:00:00Z", "1970-01-01T00:01:00Z")));
assertThat(docCounts(h), equalTo(Arrays.asList(3, 1)));
}
private static
|
InternalAutoDateHistogramTests
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/http/TestHttpResponse.java
|
{
"start": 699,
"end": 1683
}
|
class ____ implements HttpResponse {
private final RestStatus status;
private final BytesReference content;
private final Map<String, List<String>> headers = new HashMap<>();
TestHttpResponse(RestStatus status, BytesReference content) {
this.status = status;
this.content = content;
}
public BytesReference content() {
return content;
}
public RestStatus status() {
return status;
}
public Map<String, List<String>> headers() {
return headers;
}
@Override
public void addHeader(String name, String value) {
if (headers.containsKey(name) == false) {
ArrayList<String> values = new ArrayList<>();
values.add(value);
headers.put(name, values);
} else {
headers.get(name).add(value);
}
}
@Override
public boolean containsHeader(String name) {
return headers.containsKey(name);
}
}
|
TestHttpResponse
|
java
|
apache__flink
|
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerHandler.java
|
{
"start": 8840,
"end": 14917
}
|
class ____<REQ extends MessageBody, RESP extends MessageBody>
implements Runnable {
private final AbstractServerHandler<REQ, RESP> handler;
private final ChannelHandlerContext ctx;
private final long requestId;
private final REQ request;
private final KvStateRequestStats stats;
private final long creationNanos;
AsyncRequestTask(
final AbstractServerHandler<REQ, RESP> handler,
final ChannelHandlerContext ctx,
final long requestId,
final REQ request,
final KvStateRequestStats stats) {
this.handler = Preconditions.checkNotNull(handler);
this.ctx = Preconditions.checkNotNull(ctx);
this.requestId = requestId;
this.request = Preconditions.checkNotNull(request);
this.stats = Preconditions.checkNotNull(stats);
this.creationNanos = System.nanoTime();
}
@Override
public void run() {
if (!ctx.channel().isActive()) {
return;
}
handler.handleRequest(requestId, request)
.whenComplete(
(resp, throwable) -> {
try {
if (throwable != null) {
throw throwable instanceof CompletionException
? throwable.getCause()
: throwable;
}
if (resp == null) {
throw new BadRequestException(
handler.getServerName(),
"NULL returned for request with ID "
+ requestId
+ ".");
}
final ByteBuf serialResp =
MessageSerializer.serializeResponse(
ctx.alloc(), requestId, resp);
int highWatermark =
ctx.channel().config().getWriteBufferHighWaterMark();
ChannelFuture write;
if (serialResp.readableBytes() <= highWatermark) {
write = ctx.writeAndFlush(serialResp);
} else {
write =
ctx.writeAndFlush(
new ChunkedByteBuf(
serialResp, highWatermark));
}
write.addListener(new RequestWriteListener());
} catch (BadRequestException e) {
LOG.debug("Bad request (request ID = {})", requestId, e);
try {
stats.reportFailedRequest();
final ByteBuf err =
MessageSerializer.serializeRequestFailure(
ctx.alloc(), requestId, e);
ctx.writeAndFlush(err);
} catch (IOException io) {
LOG.error(
"Failed to respond with the error after failed request",
io);
}
} catch (Throwable t) {
LOG.error(
"Error while handling request with ID {}",
requestId,
t);
try {
stats.reportFailedRequest();
final String errMsg =
"Failed request "
+ requestId
+ "."
+ System.lineSeparator()
+ " Caused by: "
+ ExceptionUtils.stringifyException(t);
final ByteBuf err =
MessageSerializer.serializeRequestFailure(
ctx.alloc(),
requestId,
new RuntimeException(errMsg));
ctx.writeAndFlush(err);
} catch (IOException io) {
LOG.error(
"Failed to respond with the error after failed request",
io);
}
}
});
}
@Override
public String toString() {
return "AsyncRequestTask{" + "requestId=" + requestId + ", request=" + request + '}';
}
/**
* Callback after query result has been written.
*
* <p>Gathers stats and logs errors.
*/
private
|
AsyncRequestTask
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
|
{
"start": 1898,
"end": 1986
}
|
class ____ and runs external SPS service.
*/
@InterfaceAudience.Private
public final
|
starts
|
java
|
elastic__elasticsearch
|
x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java
|
{
"start": 2073,
"end": 10266
}
|
class ____ extends ESSingleNodeTestCase {
private ClusterService clusterService;
private SamlServiceProviderIndex serviceProviderIndex;
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class, IndexTemplateRegistryPlugin.class);
}
@Override
protected Settings nodeSettings() {
return Settings.builder()
.put(IDP_ENTITY_ID.getKey(), "urn:idp:org")
.put(IDP_SSO_REDIRECT_ENDPOINT.getKey(), "https://idp.org/sso/init")
.put(super.nodeSettings())
.build();
}
@Before
public void setupComponents() throws Exception {
clusterService = super.getInstanceFromNode(ClusterService.class);
serviceProviderIndex = new SamlServiceProviderIndex(client(), clusterService);
}
@After
public void deleteTemplateAndIndex() {
indicesAdmin().delete(new DeleteIndexRequest(SamlServiceProviderIndex.INDEX_NAME + "*")).actionGet();
indicesAdmin().prepareDeleteTemplate(SamlServiceProviderIndex.TEMPLATE_NAME).get();
serviceProviderIndex.close();
}
public void testWriteAndFindServiceProvidersFromIndex() {
final int count = randomIntBetween(3, 5);
List<SamlServiceProviderDocument> documents = new ArrayList<>(count);
// Index should not exist yet
assertThat(clusterService.state().metadata().getProject().index(SamlServiceProviderIndex.INDEX_NAME), nullValue());
for (int i = 0; i < count; i++) {
final SamlServiceProviderDocument doc = randomDocument(i);
writeDocument(doc);
documents.add(doc);
}
final IndexMetadata indexMetadata = clusterService.state().metadata().getProject().index(SamlServiceProviderIndex.INDEX_NAME);
assertThat(indexMetadata, notNullValue());
assertThat(indexMetadata.getSettings().get("index.format"), equalTo("1"));
assertThat(indexMetadata.getAliases().size(), equalTo(1));
assertThat(indexMetadata.getAliases().keySet().toArray(), arrayContainingInAnyOrder(SamlServiceProviderIndex.ALIAS_NAME));
refresh();
final Set<SamlServiceProviderDocument> allDocs = getAllDocs();
assertThat(allDocs, iterableWithSize(count));
for (SamlServiceProviderDocument doc : documents) {
assertThat(allDocs, hasItem(Matchers.equalTo(doc)));
}
final SamlServiceProviderDocument readDoc = randomFrom(documents);
assertThat(readDocument(readDoc.docId), equalTo(readDoc));
final SamlServiceProviderDocument findDoc = randomFrom(documents);
assertThat(findByEntityId(findDoc.entityId), equalTo(findDoc));
final SamlServiceProviderDocument deleteDoc = randomFrom(documents);
final DeleteResponse deleteResponse = deleteDocument(deleteDoc);
assertThat(deleteResponse.getId(), equalTo(deleteDoc.docId));
assertThat(deleteResponse.getResult(), equalTo(DocWriteResponse.Result.DELETED));
refresh();
assertThat(readDocument(deleteDoc.docId), nullValue());
assertThat(findAllByEntityId(deleteDoc.entityId), emptyIterable());
}
public void testWritesViaAliasIfItExists() {
// Create an index that will trigger the template, but isn't the standard index name
final String customIndexName = SamlServiceProviderIndex.INDEX_NAME + "-test";
indicesAdmin().create(new CreateIndexRequest(customIndexName)).actionGet();
final IndexMetadata indexMetadata = clusterService.state().metadata().getProject().index(customIndexName);
assertThat(indexMetadata, notNullValue());
assertThat(indexMetadata.getSettings().get("index.format"), equalTo("1"));
assertThat(indexMetadata.getAliases().size(), equalTo(1));
assertThat(indexMetadata.getAliases().keySet().toArray(), arrayContainingInAnyOrder(SamlServiceProviderIndex.ALIAS_NAME));
SamlServiceProviderDocument document = randomDocument(1);
writeDocument(document);
// Index should not exist because we created an alternate index, and the alias points to that.
assertThat(clusterService.state().metadata().getProject().index(SamlServiceProviderIndex.INDEX_NAME), nullValue());
refresh();
final Set<SamlServiceProviderDocument> allDocs = getAllDocs();
assertThat(allDocs, iterableWithSize(1));
assertThat(allDocs, hasItem(Matchers.equalTo(document)));
assertThat(readDocument(document.docId), equalTo(document));
}
private Set<SamlServiceProviderDocument> getAllDocs() {
final PlainActionFuture<Set<SamlServiceProviderDocument>> future = new PlainActionFuture<>();
serviceProviderIndex.findAll(
assertListenerIsOnlyCalledOnce(
future.delegateFailureAndWrap(
(f, set) -> f.onResponse(set.stream().map(doc -> doc.document.get()).collect(Collectors.toUnmodifiableSet()))
)
)
);
return future.actionGet();
}
private SamlServiceProviderDocument readDocument(String docId) {
final PlainActionFuture<SamlServiceProviderIndex.DocumentSupplier> future = new PlainActionFuture<>();
serviceProviderIndex.readDocument(docId, assertListenerIsOnlyCalledOnce(future));
final SamlServiceProviderIndex.DocumentSupplier supplier = future.actionGet();
return supplier == null ? null : supplier.getDocument();
}
private void writeDocument(SamlServiceProviderDocument doc) {
final PlainActionFuture<DocWriteResponse> future = new PlainActionFuture<>();
serviceProviderIndex.writeDocument(
doc,
DocWriteRequest.OpType.INDEX,
WriteRequest.RefreshPolicy.WAIT_UNTIL,
assertListenerIsOnlyCalledOnce(future)
);
doc.setDocId(future.actionGet().getId());
}
private DeleteResponse deleteDocument(SamlServiceProviderDocument doc) {
final PlainActionFuture<DeleteResponse> future = new PlainActionFuture<>();
serviceProviderIndex.readDocument(
doc.docId,
assertListenerIsOnlyCalledOnce(
future.delegateFailureAndWrap(
(f, info) -> serviceProviderIndex.deleteDocument(info.version, WriteRequest.RefreshPolicy.IMMEDIATE, f)
)
)
);
return future.actionGet();
}
private SamlServiceProviderDocument findByEntityId(String entityId) {
final Set<SamlServiceProviderDocument> docs = findAllByEntityId(entityId);
assertThat(docs, iterableWithSize(1));
return docs.iterator().next();
}
private Set<SamlServiceProviderDocument> findAllByEntityId(String entityId) {
final PlainActionFuture<Set<SamlServiceProviderDocument>> future = new PlainActionFuture<>();
serviceProviderIndex.findByEntityId(
entityId,
assertListenerIsOnlyCalledOnce(
future.delegateFailureAndWrap(
(f, set) -> f.onResponse(set.stream().map(doc -> doc.document.get()).collect(Collectors.toUnmodifiableSet()))
)
)
);
return future.actionGet();
}
private void refresh() {
PlainActionFuture<Void> future = new PlainActionFuture<>();
serviceProviderIndex.refresh(assertListenerIsOnlyCalledOnce(future));
future.actionGet();
}
private static <T> ActionListener<T> assertListenerIsOnlyCalledOnce(ActionListener<T> delegate) {
final AtomicInteger callCount = new AtomicInteger(0);
return ActionListener.runBefore(delegate, () -> {
if (callCount.incrementAndGet() != 1) {
fail("Listener was called twice");
}
});
}
// Since we just want to test the template handling in this test suite, we don't need to go through
// all the hassle of the setup required to *actually* enable the plugin (we do that elsewhere), we
// just need to make sure the template registry is here.
public static
|
SamlServiceProviderIndexTests
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2591MergeInheritedPluginConfigTest.java
|
{
"start": 1134,
"end": 3703
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test aggregation of list configuration items for build plugins when using 'combine.children=append' attribute.
* This variation of the test does not employ profiles.
*
* @throws Exception in case of failure
*/
@Test
public void testitNoProfile() throws Exception {
testit("no-profile");
}
/**
* Test aggregation of list configuration items for build plugins when using 'combine.children=append' attribute.
* This variation of the test employs active profiles which however are empty and should not change the result.
*
* @throws Exception in case of failure
*/
@Test
public void testitWithProfile() throws Exception {
testit("with-profile");
}
private void testit(String project) throws Exception {
File testDir = extractResources("/mng-2591/" + project);
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("subproject/target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties props = verifier.loadProperties("subproject/target/config.properties");
assertEquals("8", props.getProperty("stringParams"));
assertEquals("PARENT-1", props.getProperty("stringParams.0"));
assertEquals("PARENT-3", props.getProperty("stringParams.1"));
assertEquals("PARENT-2", props.getProperty("stringParams.2"));
assertEquals("PARENT-4", props.getProperty("stringParams.3"));
assertEquals("CHILD-1", props.getProperty("stringParams.4"));
assertEquals("CHILD-3", props.getProperty("stringParams.5"));
assertEquals("CHILD-2", props.getProperty("stringParams.6"));
assertEquals("CHILD-4", props.getProperty("stringParams.7"));
assertEquals("8", props.getProperty("listParam"));
assertEquals("PARENT-1", props.getProperty("listParam.0"));
assertEquals("PARENT-3", props.getProperty("listParam.1"));
assertEquals("PARENT-2", props.getProperty("listParam.2"));
assertEquals("PARENT-4", props.getProperty("listParam.3"));
assertEquals("CHILD-1", props.getProperty("listParam.4"));
assertEquals("CHILD-3", props.getProperty("listParam.5"));
assertEquals("CHILD-2", props.getProperty("listParam.6"));
assertEquals("CHILD-4", props.getProperty("listParam.7"));
}
}
|
MavenITmng2591MergeInheritedPluginConfigTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java
|
{
"start": 15936,
"end": 16985
}
|
class ____ {
@Nullable // if cleared
private ArrayList<Runnable> checks = new ArrayList<>();
synchronized void clear() {
checks = null;
}
synchronized void add(Runnable check) {
if (checks != null) {
checks.add(check);
}
}
void runAll() {
// It's ok not to run all the checks so there's no need for a completely synchronized iteration.
final int count;
synchronized (this) {
if (checks == null) {
return;
}
count = checks.size();
}
for (int i = 0; i < count; i++) {
final Runnable cancellationCheck;
synchronized (this) {
if (checks == null) {
return;
}
cancellationCheck = checks.get(i);
}
cancellationCheck.run();
}
}
}
}
|
CancellationChecks
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/common/TestTypes.java
|
{
"start": 2230,
"end": 2475
}
|
class ____ {
public static final String FIELD_KEY = "base";
public final Collection<Base> base;
public ClassWithBaseCollectionField(Collection<Base> base) {
this.base = base;
}
}
public static
|
ClassWithBaseCollectionField
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/valuehandlingmode/inline/SubQueryTest.java
|
{
"start": 5538,
"end": 6283
}
|
class ____ {
@Id
@GeneratedValue
Long id;
@ManyToOne(fetch = FetchType.LAZY)
Patient patient;
String description;
public Task() {
}
public Task(Patient patient) {
this.patient = patient;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
Task task = (Task) o;
return id.equals( task.id );
}
@Override
public int hashCode() {
return Objects.hash( id );
}
@Override
public String toString() {
return String.format( "Task(id: %d; description: %s)", id, description == null ? "null" : description );
}
}
@Entity(name = "Patient")
@Table(name = "Patient")
public static
|
Task
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/codec/vectors/diskbbq/next/ESNextDiskBBQVectorsFormatTests.java
|
{
"start": 2855,
"end": 18101
}
|
class ____ extends BaseKnnVectorsFormatTestCase {
static {
LogConfigurator.loadLog4jPlugins();
LogConfigurator.configureESLogging(); // native access requires logging to be initialized
}
KnnVectorsFormat format;
@Before
@Override
public void setUp() throws Exception {
ESNextDiskBBQVectorsFormat.QuantEncoding encoding = ESNextDiskBBQVectorsFormat.QuantEncoding.values()[random().nextInt(
ESNextDiskBBQVectorsFormat.QuantEncoding.values().length
)];
if (rarely()) {
format = new ESNextDiskBBQVectorsFormat(
encoding,
random().nextInt(2 * MIN_VECTORS_PER_CLUSTER, ESNextDiskBBQVectorsFormat.MAX_VECTORS_PER_CLUSTER),
random().nextInt(8, ESNextDiskBBQVectorsFormat.MAX_CENTROIDS_PER_PARENT_CLUSTER)
);
} else {
// run with low numbers to force many clusters with parents
format = new ESNextDiskBBQVectorsFormat(
encoding,
random().nextInt(MIN_VECTORS_PER_CLUSTER, 2 * MIN_VECTORS_PER_CLUSTER),
random().nextInt(MIN_CENTROIDS_PER_PARENT_CLUSTER, 8)
);
}
super.setUp();
}
@Override
protected VectorSimilarityFunction randomSimilarity() {
return RandomPicks.randomFrom(
random(),
List.of(
VectorSimilarityFunction.DOT_PRODUCT,
VectorSimilarityFunction.EUCLIDEAN,
VectorSimilarityFunction.MAXIMUM_INNER_PRODUCT
)
);
}
@Override
protected VectorEncoding randomVectorEncoding() {
return VectorEncoding.FLOAT32;
}
@Override
public void testSearchWithVisitedLimit() {
// ivf doesn't enforce visitation limit
}
@Override
protected Codec getCodec() {
return TestUtil.alwaysKnnVectorsFormat(format);
}
@Override
protected void assertOffHeapByteSize(LeafReader r, String fieldName) throws IOException {
var fieldInfo = r.getFieldInfos().fieldInfo(fieldName);
if (r instanceof CodecReader codecReader) {
KnnVectorsReader knnVectorsReader = codecReader.getVectorReader();
if (knnVectorsReader instanceof PerFieldKnnVectorsFormat.FieldsReader fieldsReader) {
knnVectorsReader = fieldsReader.getFieldReader(fieldName);
}
var offHeap = knnVectorsReader.getOffHeapByteSize(fieldInfo);
long totalByteSize = offHeap.values().stream().mapToLong(Long::longValue).sum();
assertThat(offHeap.size(), equalTo(3));
assertThat(totalByteSize, equalTo(offHeap.values().stream().mapToLong(Long::longValue).sum()));
} else {
throw new AssertionError("unexpected:" + r.getClass());
}
}
@Override
public void testAdvance() throws Exception {
// TODO re-enable with hierarchical IVF, clustering as it is is flaky
}
public void testToString() {
FilterCodec customCodec = new FilterCodec("foo", Codec.getDefault()) {
@Override
public KnnVectorsFormat knnVectorsFormat() {
return new ESNextDiskBBQVectorsFormat(128, 4);
}
};
String expectedPattern = "ESNextDiskBBQVectorsFormat(vectorPerCluster=128)";
var defaultScorer = format(Locale.ROOT, expectedPattern, "DefaultFlatVectorScorer");
var memSegScorer = format(Locale.ROOT, expectedPattern, "Lucene99MemorySegmentFlatVectorsScorer");
assertThat(customCodec.knnVectorsFormat().toString(), is(oneOf(defaultScorer, memSegScorer)));
}
public void testLimits() {
expectThrows(IllegalArgumentException.class, () -> new ESNextDiskBBQVectorsFormat(MIN_VECTORS_PER_CLUSTER - 1, 16));
expectThrows(IllegalArgumentException.class, () -> new ESNextDiskBBQVectorsFormat(MAX_VECTORS_PER_CLUSTER + 1, 16));
expectThrows(IllegalArgumentException.class, () -> new ESNextDiskBBQVectorsFormat(128, MIN_CENTROIDS_PER_PARENT_CLUSTER - 1));
expectThrows(IllegalArgumentException.class, () -> new ESNextDiskBBQVectorsFormat(128, MAX_CENTROIDS_PER_PARENT_CLUSTER + 1));
}
public void testSimpleOffHeapSize() throws IOException {
float[] vector = randomVector(random().nextInt(12, 500));
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
w.commit();
try (IndexReader reader = DirectoryReader.open(w)) {
LeafReader r = getOnlyLeafReader(reader);
if (r instanceof CodecReader codecReader) {
KnnVectorsReader knnVectorsReader = codecReader.getVectorReader();
if (knnVectorsReader instanceof PerFieldKnnVectorsFormat.FieldsReader fieldsReader) {
knnVectorsReader = fieldsReader.getFieldReader("f");
}
var fieldInfo = r.getFieldInfos().fieldInfo("f");
var offHeap = knnVectorsReader.getOffHeapByteSize(fieldInfo);
assertEquals(3, offHeap.size());
}
}
}
}
public void testFewVectorManyTimes() throws IOException {
int numDifferentVectors = random().nextInt(1, 20);
float[][] vectors = new float[numDifferentVectors][];
int dimensions = random().nextInt(12, 500);
for (int i = 0; i < numDifferentVectors; i++) {
vectors[i] = randomVector(dimensions);
}
int numDocs = random().nextInt(100, 10_000);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int i = 0; i < numDocs; i++) {
float[] vector = vectors[random().nextInt(numDifferentVectors)];
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.commit();
if (rarely()) {
w.forceMerge(1);
}
try (IndexReader reader = DirectoryReader.open(w)) {
List<LeafReaderContext> subReaders = reader.leaves();
for (LeafReaderContext r : subReaders) {
LeafReader leafReader = r.reader();
float[] vector = randomVector(dimensions);
TopDocs topDocs = leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
assertEquals(Math.min(leafReader.maxDoc(), 10), topDocs.scoreDocs.length);
}
}
}
}
public void testOneRepeatedVector() throws IOException {
int dimensions = random().nextInt(12, 500);
float[] repeatedVector = randomVector(dimensions);
int numDocs = random().nextInt(100, 10_000);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int i = 0; i < numDocs; i++) {
float[] vector = random().nextInt(3) == 0 ? repeatedVector : randomVector(dimensions);
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.commit();
if (rarely()) {
w.forceMerge(1);
}
try (IndexReader reader = DirectoryReader.open(w)) {
List<LeafReaderContext> subReaders = reader.leaves();
for (LeafReaderContext r : subReaders) {
LeafReader leafReader = r.reader();
float[] vector = randomVector(dimensions);
TopDocs topDocs = leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
assertEquals(Math.min(leafReader.maxDoc(), 10), topDocs.scoreDocs.length);
}
}
}
}
// this is a modified version of lucene's TestSearchWithThreads test case
public void testWithThreads() throws Exception {
final int numThreads = random().nextInt(2, 5);
final int numSearches = atLeast(100);
final int numDocs = atLeast(1000);
final int dimensions = random().nextInt(12, 500);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int docCount = 0; docCount < numDocs; docCount++) {
final Document doc = new Document();
doc.add(new KnnFloatVectorField("f", randomVector(dimensions), VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.forceMerge(1);
try (IndexReader reader = DirectoryReader.open(w)) {
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[numThreads];
for (int threadID = 0; threadID < numThreads; threadID++) {
threads[threadID] = new Thread(() -> {
try {
long totSearch = 0;
for (; totSearch < numSearches && failed.get() == false; totSearch++) {
float[] vector = randomVector(dimensions);
LeafReader leafReader = getOnlyLeafReader(reader);
leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
}
assertTrue(totSearch > 0);
} catch (Exception exc) {
failed.set(true);
throw new RuntimeException(exc);
}
});
threads[threadID].setDaemon(true);
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
}
}
}
public void testRestrictiveFilterDense() throws IOException {
doRestrictiveFilter(true);
}
public void testRestrictiveFilterSparse() throws IOException {
doRestrictiveFilter(false);
}
private void doRestrictiveFilter(boolean dense) throws IOException {
int dimensions = random().nextInt(12, 500);
int maxMatchingDocs = random().nextInt(1, 10);
int matchingDocs = 0;
int numDocs = random().nextInt(100, 3_000);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
if (dense || rarely() == false) {
float[] vector = randomVector(dimensions);
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
}
doc.add(new KeywordField("k", new BytesRef("B"), Field.Store.NO));
w.addDocument(doc);
if (matchingDocs < maxMatchingDocs && rarely()) {
matchingDocs++;
doc = new Document();
doc.add(new KnnFloatVectorField("f", randomVector(dimensions), VectorSimilarityFunction.EUCLIDEAN));
doc.add(new KeywordField("k", new BytesRef("A"), Field.Store.NO));
w.addDocument(doc);
}
if (dense == false && rarely()) {
doc = new Document();
doc.add(new KeywordField("k", new BytesRef("A"), Field.Store.NO));
w.addDocument(doc);
}
}
if (matchingDocs == 0) {
// make sure we have at least one matching doc with a vector
matchingDocs++;
float[] vector = randomVector(dimensions);
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
doc.add(new KeywordField("k", new BytesRef("A"), Field.Store.NO));
w.addDocument(doc);
}
w.commit();
// force one leave
w.forceMerge(1);
try (IndexReader reader = DirectoryReader.open(w)) {
LeafReader leafReader = getOnlyLeafReader(reader);
float[] vector = randomVector(dimensions);
// we might collect the same document twice because of soar assignments
KnnCollector collector;
if (random().nextBoolean()) {
collector = new TopKnnCollector(random().nextInt(2 * matchingDocs, 3 * matchingDocs), Integer.MAX_VALUE);
} else {
collector = new TopKnnCollector(
random().nextInt(2 * matchingDocs, 3 * matchingDocs),
Integer.MAX_VALUE,
new IVFKnnSearchStrategy(0.25f, null)
);
}
leafReader.searchNearestVectors(
"f",
vector,
collector,
AcceptDocs.fromIteratorSupplier(
() -> leafReader.postings(new Term("k", new BytesRef("A"))),
leafReader.getLiveDocs(),
leafReader.maxDoc()
)
);
TopDocs topDocs = collector.topDocs();
Set<Integer> uniqueDocIds = new HashSet<>();
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
uniqueDocIds.add(topDocs.scoreDocs[i].doc);
}
assertEquals(matchingDocs, uniqueDocIds.size());
// match no docs
leafReader.searchNearestVectors(
"f",
vector,
new TopKnnCollector(2, Integer.MAX_VALUE),
AcceptDocs.fromIteratorSupplier(DocIdSetIterator::empty, leafReader.getLiveDocs(), leafReader.maxDoc())
);
}
}
}
}
|
ESNextDiskBBQVectorsFormatTests
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessorTests.java
|
{
"start": 146596,
"end": 147055
}
|
class ____ {
private TestBean testBean;
private Map<String, TestBean> testBeanMap;
@Autowired(required = false)
public void setTestBeanMap(TestBean testBean, Map<String, TestBean> testBeanMap) {
this.testBean = testBean;
this.testBeanMap = testBeanMap;
}
public TestBean getTestBean() {
return this.testBean;
}
public Map<String, TestBean> getTestBeanMap() {
return this.testBeanMap;
}
}
public static
|
MapMethodInjectionBean
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Recall.java
|
{
"start": 2441,
"end": 7192
}
|
class ____ implements EvaluationMetric {
public static final ParseField NAME = new ParseField("recall");
private static final String AGG_NAME_PREFIX = "classification_recall_";
static final String BY_ACTUAL_CLASS_AGG_NAME = AGG_NAME_PREFIX + "by_actual_class";
static final String PER_ACTUAL_CLASS_RECALL_AGG_NAME = AGG_NAME_PREFIX + "per_actual_class_recall";
static final String AVG_RECALL_AGG_NAME = AGG_NAME_PREFIX + "avg_recall";
private static final ObjectParser<Recall, Void> PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Recall::new);
public static Recall fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
private static final int MAX_CLASSES_CARDINALITY = 1000;
private final SetOnce<String> actualField = new SetOnce<>();
private final SetOnce<Result> result = new SetOnce<>();
public Recall() {}
public Recall(StreamInput in) throws IOException {}
@Override
public String getWriteableName() {
return registeredMetricName(Classification.NAME, NAME);
}
@Override
public String getName() {
return NAME.getPreferredName();
}
@Override
public Set<String> getRequiredFields() {
return Sets.newHashSet(EvaluationFields.ACTUAL_FIELD.getPreferredName(), EvaluationFields.PREDICTED_FIELD.getPreferredName());
}
@Override
public final Tuple<List<AggregationBuilder>, List<PipelineAggregationBuilder>> aggs(
EvaluationParameters parameters,
EvaluationFields fields
) {
String actualFieldName = fields.getActualField();
String predictedField = fields.getPredictedField();
// Store given {@code actualField} for the purpose of generating error message in {@code process}.
this.actualField.trySet(actualFieldName);
if (result.get() != null) {
return Tuple.tuple(List.of(), List.of());
}
Script script = PainlessScripts.buildIsEqualScript(actualFieldName, predictedField);
return Tuple.tuple(
List.of(
AggregationBuilders.terms(BY_ACTUAL_CLASS_AGG_NAME)
.field(actualFieldName)
.order(List.of(BucketOrder.count(false), BucketOrder.key(true)))
.size(MAX_CLASSES_CARDINALITY)
.subAggregation(AggregationBuilders.avg(PER_ACTUAL_CLASS_RECALL_AGG_NAME).script(script))
),
List.of(
PipelineAggregatorBuilders.avgBucket(AVG_RECALL_AGG_NAME, BY_ACTUAL_CLASS_AGG_NAME + ">" + PER_ACTUAL_CLASS_RECALL_AGG_NAME)
)
);
}
@Override
public void process(InternalAggregations aggs) {
final Aggregation byClass = aggs.get(BY_ACTUAL_CLASS_AGG_NAME);
final Aggregation avgRecall = aggs.get(AVG_RECALL_AGG_NAME);
if (result.get() == null
&& byClass instanceof Terms byActualClassAgg
&& avgRecall instanceof NumericMetricsAggregation.SingleValue avgRecallAgg) {
if (byActualClassAgg.getSumOfOtherDocCounts() > 0) {
// This means there were more than {@code MAX_CLASSES_CARDINALITY} buckets.
// We cannot calculate average recall accurately, so we fail.
throw ExceptionsHelper.badRequestException(
"Cannot calculate average recall. Cardinality of field [{}] is too high",
actualField.get()
);
}
List<PerClassSingleValue> classes = new ArrayList<>(byActualClassAgg.getBuckets().size());
for (Terms.Bucket bucket : byActualClassAgg.getBuckets()) {
String className = bucket.getKeyAsString();
NumericMetricsAggregation.SingleValue recallAgg = bucket.getAggregations().get(PER_ACTUAL_CLASS_RECALL_AGG_NAME);
classes.add(new PerClassSingleValue(className, recallAgg.value()));
}
result.set(new Result(classes, avgRecallAgg.value()));
}
}
@Override
public Optional<Result> getResult() {
return Optional.ofNullable(result.get());
}
@Override
public void writeTo(StreamOutput out) throws IOException {}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hashCode(NAME.getPreferredName());
}
public static
|
Recall
|
java
|
apache__logging-log4j2
|
log4j-to-slf4j/src/test/java/org/apache/logging/slf4j/MDCTestHelper.java
|
{
"start": 926,
"end": 1297
}
|
class ____ {
static MDCAdapter replaceMDCAdapter(final MDCAdapter adapter) throws Exception {
Field mdcAdapterField = MDC.class.getDeclaredField("MDC_ADAPTER");
mdcAdapterField.setAccessible(true);
final MDCAdapter old = (MDCAdapter) mdcAdapterField.get(null);
mdcAdapterField.set(null, adapter);
return old;
}
}
|
MDCTestHelper
|
java
|
netty__netty
|
transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java
|
{
"start": 2316,
"end": 19852
}
|
class ____ extends AbstractEpollChannel implements DatagramChannel {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(EpollDatagramChannel.class);
private static final boolean IP_MULTICAST_ALL =
SystemPropertyUtil.getBoolean("io.netty.channel.epoll.ipMulticastAll", false);
private static final ChannelMetadata METADATA = new ChannelMetadata(true, 16);
private static final String EXPECTED_TYPES =
" (expected: " + StringUtil.simpleClassName(DatagramPacket.class) + ", " +
StringUtil.simpleClassName(AddressedEnvelope.class) + '<' +
StringUtil.simpleClassName(ByteBuf.class) + ", " +
StringUtil.simpleClassName(InetSocketAddress.class) + ">, " +
StringUtil.simpleClassName(ByteBuf.class) + ')';
private final EpollDatagramChannelConfig config;
private volatile boolean connected;
static {
if (logger.isDebugEnabled()) {
logger.debug("-Dio.netty.channel.epoll.ipMulticastAll: {}", IP_MULTICAST_ALL);
}
}
/**
* Returns {@code true} if {@link io.netty.channel.unix.SegmentedDatagramPacket} is supported natively.
*
* @return {@code true} if supported, {@code false} otherwise.
*/
public static boolean isSegmentedDatagramPacketSupported() {
return Epoll.isAvailable() &&
// We only support it together with sendmmsg(...)
Native.IS_SUPPORTING_SENDMMSG && Native.IS_SUPPORTING_UDP_SEGMENT;
}
/**
* Create a new instance which selects the {@link SocketProtocolFamily} to use depending
* on the Operation Systems default which will be chosen.
*/
public EpollDatagramChannel() {
this((SocketProtocolFamily) null);
}
/**
* Create a new instance using the given {@link InternetProtocolFamily}. If {@code null} is used it will depend
* on the Operation Systems default which will be chosen.
*
* @deprecated use {@link EpollDatagramChannel#EpollDatagramChannel(SocketProtocolFamily)}
*/
@Deprecated
public EpollDatagramChannel(InternetProtocolFamily family) {
this(newSocketDgram(family), false);
}
/**
* Create a new instance using the given {@link SocketProtocolFamily}. If {@code null} is used it will depend
* on the Operation Systems default which will be chosen.
*/
public EpollDatagramChannel(SocketProtocolFamily family) {
this(newSocketDgram(family), false);
}
/**
* Create a new instance which selects the {@link SocketProtocolFamily} to use depending
* on the Operation Systems default which will be chosen.
*/
public EpollDatagramChannel(int fd) {
this(new LinuxSocket(fd), true);
}
private EpollDatagramChannel(LinuxSocket fd, boolean active) {
super(null, fd, active, EpollIoOps.valueOf(0));
// Configure IP_MULTICAST_ALL - disable by default to match the behaviour of NIO.
try {
fd.setIpMulticastAll(IP_MULTICAST_ALL);
} catch (IOException e) {
logger.debug("Failed to set IP_MULTICAST_ALL to {}", IP_MULTICAST_ALL, e);
}
config = new EpollDatagramChannelConfig(this);
}
@Override
public InetSocketAddress remoteAddress() {
return (InetSocketAddress) super.remoteAddress();
}
@Override
public InetSocketAddress localAddress() {
return (InetSocketAddress) super.localAddress();
}
@Override
public ChannelMetadata metadata() {
return METADATA;
}
@Override
public boolean isActive() {
return socket.isOpen() && (config.getActiveOnOpen() && isRegistered() || active);
}
@Override
public boolean isConnected() {
return connected;
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress) {
return joinGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
NetworkInterface iface = config().getNetworkInterface();
if (iface == null) {
iface = NetworkInterface.getByInetAddress(localAddress().getAddress());
}
return joinGroup(multicastAddress, iface, null, promise);
} catch (IOException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return joinGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface,
ChannelPromise promise) {
return joinGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture joinGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return joinGroup(multicastAddress, networkInterface, source, newPromise());
}
@Override
public ChannelFuture joinGroup(
final InetAddress multicastAddress, final NetworkInterface networkInterface,
final InetAddress source, final ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
if (eventLoop().inEventLoop()) {
joinGroup0(multicastAddress, networkInterface, source, promise);
} else {
eventLoop().execute(new Runnable() {
@Override
public void run() {
joinGroup0(multicastAddress, networkInterface, source, promise);
}
});
}
return promise;
}
private void joinGroup0(
final InetAddress multicastAddress, final NetworkInterface networkInterface,
final InetAddress source, final ChannelPromise promise) {
assert eventLoop().inEventLoop();
try {
socket.joinGroup(multicastAddress, networkInterface, source);
promise.setSuccess();
} catch (IOException e) {
promise.setFailure(e);
}
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress) {
return leaveGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
return leaveGroup(
multicastAddress, NetworkInterface.getByInetAddress(localAddress().getAddress()), null, promise);
} catch (IOException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return leaveGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress,
NetworkInterface networkInterface, ChannelPromise promise) {
return leaveGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture leaveGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return leaveGroup(multicastAddress, networkInterface, source, newPromise());
}
@Override
public ChannelFuture leaveGroup(
final InetAddress multicastAddress, final NetworkInterface networkInterface, final InetAddress source,
final ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
if (eventLoop().inEventLoop()) {
leaveGroup0(multicastAddress, networkInterface, source, promise);
} else {
eventLoop().execute(new Runnable() {
@Override
public void run() {
leaveGroup0(multicastAddress, networkInterface, source, promise);
}
});
}
return promise;
}
private void leaveGroup0(
final InetAddress multicastAddress, final NetworkInterface networkInterface, final InetAddress source,
final ChannelPromise promise) {
assert eventLoop().inEventLoop();
try {
socket.leaveGroup(multicastAddress, networkInterface, source);
promise.setSuccess();
} catch (IOException e) {
promise.setFailure(e);
}
}
@Override
public ChannelFuture block(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress sourceToBlock) {
return block(multicastAddress, networkInterface, sourceToBlock, newPromise());
}
@Override
public ChannelFuture block(
final InetAddress multicastAddress, final NetworkInterface networkInterface,
final InetAddress sourceToBlock, final ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(sourceToBlock, "sourceToBlock");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
promise.setFailure(new UnsupportedOperationException("Multicast block not supported"));
return promise;
}
@Override
public ChannelFuture block(InetAddress multicastAddress, InetAddress sourceToBlock) {
return block(multicastAddress, sourceToBlock, newPromise());
}
@Override
public ChannelFuture block(
InetAddress multicastAddress, InetAddress sourceToBlock, ChannelPromise promise) {
try {
return block(
multicastAddress,
NetworkInterface.getByInetAddress(localAddress().getAddress()),
sourceToBlock, promise);
} catch (Throwable e) {
promise.setFailure(e);
}
return promise;
}
@Override
protected AbstractEpollUnsafe newUnsafe() {
return new EpollDatagramChannelUnsafe();
}
@Override
protected void doBind(SocketAddress localAddress) throws Exception {
if (localAddress instanceof InetSocketAddress) {
InetSocketAddress socketAddress = (InetSocketAddress) localAddress;
if (socketAddress.getAddress().isAnyLocalAddress() &&
socketAddress.getAddress() instanceof Inet4Address) {
if (socket.family() == SocketProtocolFamily.INET6) {
localAddress = new InetSocketAddress(Native.INET6_ANY, socketAddress.getPort());
}
}
}
super.doBind(localAddress);
active = true;
}
@Override
protected void doWrite(ChannelOutboundBuffer in) throws Exception {
int maxMessagesPerWrite = maxMessagesPerWrite();
while (maxMessagesPerWrite > 0) {
Object msg = in.current();
if (msg == null) {
// Wrote all messages.
break;
}
try {
// Check if sendmmsg(...) is supported which is only the case for GLIBC 2.14+
if (Native.IS_SUPPORTING_SENDMMSG && in.size() > 1 ||
// We only handle UDP_SEGMENT in sendmmsg.
in.current() instanceof io.netty.channel.unix.SegmentedDatagramPacket) {
NativeDatagramPacketArray array = cleanDatagramPacketArray();
array.add(in, isConnected(), maxMessagesPerWrite);
int cnt = array.count();
if (cnt >= 1) {
// Try to use gathering writes via sendmmsg(...) syscall.
int offset = 0;
NativeDatagramPacketArray.NativeDatagramPacket[] packets = array.packets();
int send = socket.sendmmsg(packets, offset, cnt);
if (send == 0) {
// Did not write all messages.
break;
}
for (int i = 0; i < send; i++) {
in.remove();
}
maxMessagesPerWrite -= send;
continue;
}
}
boolean done = false;
for (int i = config().getWriteSpinCount(); i > 0; --i) {
if (doWriteMessage(msg)) {
done = true;
break;
}
}
if (done) {
in.remove();
maxMessagesPerWrite --;
} else {
break;
}
} catch (IOException e) {
maxMessagesPerWrite --;
// Continue on write error as a DatagramChannel can write to multiple remote peers
//
// See https://github.com/netty/netty/issues/2665
in.remove(e);
}
}
if (in.isEmpty()) {
// Did write all messages.
clearFlag(Native.EPOLLOUT);
} else {
// Did not write all messages.
setFlag(Native.EPOLLOUT);
}
}
private boolean doWriteMessage(Object msg) throws Exception {
final ByteBuf data;
final InetSocketAddress remoteAddress;
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<ByteBuf, InetSocketAddress> envelope =
(AddressedEnvelope<ByteBuf, InetSocketAddress>) msg;
data = envelope.content();
remoteAddress = envelope.recipient();
} else {
data = (ByteBuf) msg;
remoteAddress = null;
}
final int dataLen = data.readableBytes();
if (dataLen == 0) {
return true;
}
return doWriteOrSendBytes(data, remoteAddress, false) > 0;
}
private static void checkUnresolved(AddressedEnvelope<?, ?> envelope) {
if (envelope.recipient() instanceof InetSocketAddress
&& (((InetSocketAddress) envelope.recipient()).isUnresolved())) {
throw new UnresolvedAddressException();
}
}
@Override
protected Object filterOutboundMessage(Object msg) {
if (msg instanceof io.netty.channel.unix.SegmentedDatagramPacket) {
if (!Native.IS_SUPPORTING_UDP_SEGMENT) {
throw new UnsupportedOperationException(
"unsupported message type: " + StringUtil.simpleClassName(msg) + EXPECTED_TYPES);
}
io.netty.channel.unix.SegmentedDatagramPacket packet = (io.netty.channel.unix.SegmentedDatagramPacket) msg;
checkUnresolved(packet);
ByteBuf content = packet.content();
return UnixChannelUtil.isBufferCopyNeededForWrite(content) ?
packet.replace(newDirectBuffer(packet, content)) : msg;
}
if (msg instanceof DatagramPacket) {
DatagramPacket packet = (DatagramPacket) msg;
checkUnresolved(packet);
ByteBuf content = packet.content();
return UnixChannelUtil.isBufferCopyNeededForWrite(content) ?
new DatagramPacket(newDirectBuffer(packet, content), packet.recipient()) : msg;
}
if (msg instanceof ByteBuf) {
ByteBuf buf = (ByteBuf) msg;
return UnixChannelUtil.isBufferCopyNeededForWrite(buf)? newDirectBuffer(buf) : buf;
}
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<Object, SocketAddress> e = (AddressedEnvelope<Object, SocketAddress>) msg;
checkUnresolved(e);
if (e.content() instanceof ByteBuf &&
(e.recipient() == null || e.recipient() instanceof InetSocketAddress)) {
ByteBuf content = (ByteBuf) e.content();
return UnixChannelUtil.isBufferCopyNeededForWrite(content)?
new DefaultAddressedEnvelope<ByteBuf, InetSocketAddress>(
newDirectBuffer(e, content), (InetSocketAddress) e.recipient()) : e;
}
}
throw new UnsupportedOperationException(
"unsupported message type: " + StringUtil.simpleClassName(msg) + EXPECTED_TYPES);
}
@Override
public EpollDatagramChannelConfig config() {
return config;
}
@Override
protected void doDisconnect() throws Exception {
socket.disconnect();
connected = active = false;
resetCachedAddresses();
}
@Override
protected boolean doConnect(SocketAddress remoteAddress, SocketAddress localAddress) throws Exception {
if (super.doConnect(remoteAddress, localAddress)) {
connected = true;
return true;
}
return false;
}
@Override
protected void doClose() throws Exception {
super.doClose();
connected = false;
}
final
|
EpollDatagramChannel
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/connection/SingleConnectionFactory.java
|
{
"start": 2832,
"end": 4004
}
|
class ____ JMS 2.0 {@code JMSContext}
* calls and therefore requires the JMS 2.0 API to be present at runtime.
* It may nevertheless run against a JMS 1.1 driver (bound to the JMS 2.0 API)
* as long as no actual JMS 2.0 calls are triggered by the application's setup.
*
* <p>Useful for testing and standalone environments in order to keep using the
* same Connection for multiple {@link org.springframework.jms.core.JmsTemplate}
* calls, without having a pooling ConnectionFactory underneath. This may span
* any number of transactions, even concurrently executing transactions.
*
* <p>Note that Spring's message listener containers support the use of
* a shared Connection within each listener container instance. Using
* SingleConnectionFactory in combination only really makes sense for
* sharing a single JMS Connection <i>across multiple listener containers</i>.
*
* @author Juergen Hoeller
* @author Mark Pollack
* @since 1.1
* @see org.springframework.jms.core.JmsTemplate
* @see org.springframework.jms.listener.SimpleMessageListenerContainer
* @see org.springframework.jms.listener.DefaultMessageListenerContainer#setCacheLevel
*/
public
|
supports
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-eks/src/test/java/org/apache/camel/component/aws2/eks/EKS2ComponentConfigurationTest.java
|
{
"start": 1158,
"end": 4655
}
|
class ____ extends CamelTestSupport {
@Test
public void createEndpointWithComponentElements() throws Exception {
EKS2Component component = context.getComponent("aws2-eks", EKS2Component.class);
component.getConfiguration().setAccessKey("XXX");
component.getConfiguration().setSecretKey("YYY");
EKS2Endpoint endpoint = (EKS2Endpoint) component.createEndpoint("aws2-eks://label");
assertEquals("XXX", endpoint.getConfiguration().getAccessKey());
assertEquals("YYY", endpoint.getConfiguration().getSecretKey());
}
@Test
public void createEndpointWithComponentAndEndpointElements() throws Exception {
EKS2Component component = context.getComponent("aws2-eks", EKS2Component.class);
component.getConfiguration().setAccessKey("XXX");
component.getConfiguration().setSecretKey("YYY");
component.getConfiguration().setRegion(Region.US_WEST_1.toString());
EKS2Endpoint endpoint
= (EKS2Endpoint) component.createEndpoint("aws2-eks://label?accessKey=xxxxxx&secretKey=yyyyy®ion=US_EAST_1");
assertEquals("xxxxxx", endpoint.getConfiguration().getAccessKey());
assertEquals("yyyyy", endpoint.getConfiguration().getSecretKey());
assertEquals("US_EAST_1", endpoint.getConfiguration().getRegion());
}
@Test
public void createEndpointWithComponentEndpointElementsAndProxy() throws Exception {
EKS2Component component = context.getComponent("aws2-eks", EKS2Component.class);
component.getConfiguration().setAccessKey("XXX");
component.getConfiguration().setSecretKey("YYY");
component.getConfiguration().setRegion(Region.US_WEST_1.toString());
EKS2Endpoint endpoint = (EKS2Endpoint) component
.createEndpoint(
"aws2-eks://label?accessKey=xxxxxx&secretKey=yyyyy®ion=US_EAST_1&proxyHost=localhost&proxyPort=9000&proxyProtocol=HTTP");
assertEquals("xxxxxx", endpoint.getConfiguration().getAccessKey());
assertEquals("yyyyy", endpoint.getConfiguration().getSecretKey());
assertEquals("US_EAST_1", endpoint.getConfiguration().getRegion());
assertEquals(Protocol.HTTP, endpoint.getConfiguration().getProxyProtocol());
assertEquals("localhost", endpoint.getConfiguration().getProxyHost());
assertEquals(Integer.valueOf(9000), endpoint.getConfiguration().getProxyPort());
}
@Test
public void createEndpointWithOverrideEndpoint() throws Exception {
EKS2Component component = context.getComponent("aws2-eks", EKS2Component.class);
component.getConfiguration().setAccessKey("XXX");
component.getConfiguration().setSecretKey("YYY");
component.getConfiguration().setRegion(Region.US_WEST_1.toString());
EKS2Endpoint endpoint
= (EKS2Endpoint) component.createEndpoint(
"aws2-eks://label?accessKey=xxxxxx&secretKey=yyyyy®ion=US_EAST_1&overrideEndpoint=true&uriEndpointOverride=http://localhost:9090");
assertEquals("xxxxxx", endpoint.getConfiguration().getAccessKey());
assertEquals("yyyyy", endpoint.getConfiguration().getSecretKey());
assertEquals("US_EAST_1", endpoint.getConfiguration().getRegion());
assertTrue(endpoint.getConfiguration().isOverrideEndpoint());
assertEquals("http://localhost:9090", endpoint.getConfiguration().getUriEndpointOverride());
}
}
|
EKS2ComponentConfigurationTest
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/model/MappingBuilderContext.java
|
{
"start": 2923,
"end": 10289
}
|
interface ____ {
/**
* returns a parameter assignment
*
* @param mappingMethod target mapping method
* @param description the description source
* @param targetType return type to match
* @param formattingParameters used for formatting dates and numbers
* @param criteria parameters criteria in the selection process
* @param sourceRHS source information
* @param positionHint the mirror for reporting problems
* @param forger the supplier of the callback method to forge a method
*
* @return an assignment to a method parameter, which can either be:
* <ol>
* <li>MethodReference</li>
* <li>TypeConversion</li>
* <li>SourceRHS Assignment (empty TargetAssignment)</li>
* <li>null, no assignment found</li>
* </ol>
*/
Assignment getTargetAssignment(Method mappingMethod, ForgedMethodHistory description, Type targetType,
FormattingParameters formattingParameters,
SelectionCriteria criteria, SourceRHS sourceRHS,
AnnotationMirror positionHint,
Supplier<Assignment> forger);
Set<SupportingMappingMethod> getUsedSupportedMappings();
Set<Field> getUsedSupportedFields();
}
private final TypeFactory typeFactory;
private final ElementUtils elementUtils;
private final TypeUtils typeUtils;
private final FormattingMessager messager;
private final AccessorNamingUtils accessorNaming;
private final EnumMappingStrategy enumMappingStrategy;
private final Map<String, EnumTransformationStrategy> enumTransformationStrategies;
private final Options options;
private final TypeElement mapperTypeElement;
private final List<SourceMethod> sourceModel;
private final List<MapperReference> mapperReferences;
private final MappingResolver mappingResolver;
private final List<MappingMethod> mappingsToGenerate = new ArrayList<>();
private final Map<ForgedMethod, ForgedMethod> forgedMethodsUnderCreation =
new HashMap<>();
//CHECKSTYLE:OFF
public MappingBuilderContext(TypeFactory typeFactory,
ElementUtils elementUtils,
TypeUtils typeUtils,
FormattingMessager messager,
AccessorNamingUtils accessorNaming,
EnumMappingStrategy enumMappingStrategy,
Map<String, EnumTransformationStrategy> enumTransformationStrategies,
Options options,
MappingResolver mappingResolver,
TypeElement mapper,
List<SourceMethod> sourceModel,
List<MapperReference> mapperReferences) {
this.typeFactory = typeFactory;
this.elementUtils = elementUtils;
this.typeUtils = typeUtils;
this.messager = messager;
this.accessorNaming = accessorNaming;
this.enumMappingStrategy = enumMappingStrategy;
this.enumTransformationStrategies = enumTransformationStrategies;
this.options = options;
this.mappingResolver = mappingResolver;
this.mapperTypeElement = mapper;
this.sourceModel = sourceModel;
this.mapperReferences = mapperReferences;
}
//CHECKSTYLE:ON
/**
* Returns a map which is used to track which forged methods are under creation.
* Used for cutting the possible infinite recursion of forged method creation.
*
* Map is used instead of set because not all fields of ForgedMethods are used in equals/hashCode and we are
* interested only in the first created ForgedMethod
*
* @return map of forged methods
*/
public Map<ForgedMethod, ForgedMethod> getForgedMethodsUnderCreation() {
return forgedMethodsUnderCreation;
}
public TypeElement getMapperTypeElement() {
return mapperTypeElement;
}
public List<SourceMethod> getSourceModel() {
return sourceModel;
}
public List<MapperReference> getMapperReferences() {
return mapperReferences;
}
public TypeFactory getTypeFactory() {
return typeFactory;
}
public ElementUtils getElementUtils() {
return elementUtils;
}
public TypeUtils getTypeUtils() {
return typeUtils;
}
public FormattingMessager getMessager() {
return messager;
}
public AccessorNamingUtils getAccessorNaming() {
return accessorNaming;
}
public EnumMappingStrategy getEnumMappingStrategy() {
return enumMappingStrategy;
}
public Map<String, EnumTransformationStrategy> getEnumTransformationStrategies() {
return enumTransformationStrategies;
}
public Options getOptions() {
return options;
}
public MappingResolver getMappingResolver() {
return mappingResolver;
}
public List<MappingMethod> getMappingsToGenerate() {
return mappingsToGenerate;
}
public List<String> getReservedNames() {
Set<String> nameSet = new HashSet<>();
for ( MappingMethod method : mappingsToGenerate ) {
nameSet.add( method.getName() );
}
// add existing names
for ( SourceMethod method : sourceModel) {
if ( method.isAbstract() ) {
nameSet.add( method.getName() );
}
}
return new ArrayList<>( nameSet );
}
public MappingMethod getExistingMappingMethod(MappingMethod newMappingMethod) {
MappingMethod existingMappingMethod = null;
for ( MappingMethod mappingMethod : mappingsToGenerate ) {
if ( newMappingMethod.equals( mappingMethod ) ) {
existingMappingMethod = mappingMethod;
break;
}
}
return existingMappingMethod;
}
public Set<SupportingMappingMethod> getUsedSupportedMappings() {
return mappingResolver.getUsedSupportedMappings();
}
public Set<Field> getUsedSupportedFields() {
return mappingResolver.getUsedSupportedFields();
}
/**
* @param sourceType from which an automatic sub-mapping needs to be generated
* @param targetType to which an automatic sub-mapping needs to be generated
*
* @return {@code true} if MapStruct is allowed to try and generate an automatic sub-mapping between the
* source and target {@link Type}
*/
public boolean canGenerateAutoSubMappingBetween(Type sourceType, Type targetType) {
return canGenerateAutoSubMappingFor( sourceType ) && canGenerateAutoSubMappingFor( targetType );
}
/**
* @param type that MapStruct wants to use to generate an automatic sub-mapping for/from
*
* @return {@code true} if the type is not excluded from the {@link MappingExclusionProvider}
*/
private boolean canGenerateAutoSubMappingFor(Type type) {
return type.getTypeElement() != null && !SUB_MAPPING_EXCLUSION_PROVIDER.isExcluded( type.getTypeElement() );
}
public boolean isErroneous() {
return messager.isErroneous();
}
}
|
MappingResolver
|
java
|
quarkusio__quarkus
|
integration-tests/oidc/src/test/java/io/quarkus/it/keycloak/BearerTokenAuthorizationInGraalITCase.java
|
{
"start": 265,
"end": 510
}
|
class ____ extends BearerTokenAuthorizationTest {
DevServicesContext context;
@Test
public void testDevServicesProperties() {
assertFalse(context.devServicesProperties().isEmpty());
}
}
|
BearerTokenAuthorizationInGraalITCase
|
java
|
apache__camel
|
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
|
{
"start": 401013,
"end": 406976
}
|
class ____ extends YamlDeserializerBase<GetDefinition> {
public GetDefinitionDeserializer() {
super(GetDefinition.class);
}
@Override
protected GetDefinition newInstance() {
return new GetDefinition();
}
@Override
protected boolean setProperty(GetDefinition target, String propertyKey, String propertyName,
Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "apiDocs": {
String val = asText(node);
target.setApiDocs(val);
break;
}
case "bindingMode": {
String val = asText(node);
target.setBindingMode(val);
break;
}
case "clientRequestValidation": {
String val = asText(node);
target.setClientRequestValidation(val);
break;
}
case "clientResponseValidation": {
String val = asText(node);
target.setClientResponseValidation(val);
break;
}
case "consumes": {
String val = asText(node);
target.setConsumes(val);
break;
}
case "deprecated": {
String val = asText(node);
target.setDeprecated(val);
break;
}
case "disabled": {
String val = asText(node);
target.setDisabled(val);
break;
}
case "enableCORS": {
String val = asText(node);
target.setEnableCORS(val);
break;
}
case "enableNoContentResponse": {
String val = asText(node);
target.setEnableNoContentResponse(val);
break;
}
case "outType": {
String val = asText(node);
target.setOutType(val);
break;
}
case "param": {
java.util.List<org.apache.camel.model.rest.ParamDefinition> val = asFlatList(node, org.apache.camel.model.rest.ParamDefinition.class);
target.setParams(val);
break;
}
case "path": {
String val = asText(node);
target.setPath(val);
break;
}
case "produces": {
String val = asText(node);
target.setProduces(val);
break;
}
case "responseMessage": {
java.util.List<org.apache.camel.model.rest.ResponseMessageDefinition> val = asFlatList(node, org.apache.camel.model.rest.ResponseMessageDefinition.class);
target.setResponseMsgs(val);
break;
}
case "routeId": {
String val = asText(node);
target.setRouteId(val);
break;
}
case "security": {
java.util.List<org.apache.camel.model.rest.SecurityDefinition> val = asFlatList(node, org.apache.camel.model.rest.SecurityDefinition.class);
target.setSecurity(val);
break;
}
case "skipBindingOnErrorCode": {
String val = asText(node);
target.setSkipBindingOnErrorCode(val);
break;
}
case "streamCache": {
String val = asText(node);
target.setStreamCache(val);
break;
}
case "to": {
org.apache.camel.model.ToDefinition val = asType(node, org.apache.camel.model.ToDefinition.class);
target.setTo(val);
break;
}
case "type": {
String val = asText(node);
target.setType(val);
break;
}
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "description": {
String val = asText(node);
target.setDescription(val);
break;
}
case "note": {
String val = asText(node);
target.setNote(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "globalOption",
types = org.apache.camel.model.GlobalOptionDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Global Option",
description = "Models a string key/value pair for configuring some global options on a Camel context such as max debug log length.",
deprecated = false,
properties = {
@YamlProperty(name = "key", type = "string", required = true, description = "Global option key", displayName = "Key"),
@YamlProperty(name = "value", type = "string", required = true, description = "Global option value", displayName = "Value")
}
)
public static
|
GetDefinitionDeserializer
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/TreeMultiset.java
|
{
"start": 18111,
"end": 18478
}
|
class ____<T> {
private @Nullable T value;
@Nullable T get() {
return value;
}
void checkAndSet(@Nullable T expected, @Nullable T newValue) {
if (value != expected) {
throw new ConcurrentModificationException();
}
value = newValue;
}
void clear() {
value = null;
}
}
private static final
|
Reference
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/OverriadeTest.java
|
{
"start": 274,
"end": 505
}
|
class ____ {
protected long id;
public long getId() {
return id;
}
public void setId(long id) {
throw new UnsupportedOperationException();
}
}
public static
|
A
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/invoker/basic/PrimitiveReturnValueInvokerTest.java
|
{
"start": 2527,
"end": 2715
}
|
class ____ {
public int hello(int i) {
return 1 + i;
}
public static double helloStatic(double d) {
return 1.0 + d;
}
}
}
|
MyService
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TranslateTimeSeriesAggregate.java
|
{
"start": 20525,
"end": 22731
}
|
class ____ {
final Map<String, Integer> next = new HashMap<>();
String next(String prefix) {
int id = next.merge(prefix, 1, Integer::sum);
return prefix + "_$" + id;
}
}
void checkWindow(TimeSeriesAggregate agg) {
boolean hasWindow = false;
for (NamedExpression aggregate : agg.aggregates()) {
if (Alias.unwrap(aggregate) instanceof AggregateFunction af && af.hasWindow()) {
hasWindow = true;
break;
}
}
if (hasWindow == false) {
return;
}
final long bucketInMillis = getTimeBucketInMillis(agg);
if (bucketInMillis <= 0) {
throw new EsqlIllegalArgumentException(
"Using a window in aggregation [{}] requires a time bucket in groupings",
agg.sourceText()
);
}
for (NamedExpression aggregate : agg.aggregates()) {
if (Alias.unwrap(aggregate) instanceof AggregateFunction af && af.hasWindow()) {
Expression window = af.window();
if (window.foldable() && window.fold(FoldContext.small()) instanceof Duration d) {
final long windowInMills = d.toMillis();
if (windowInMills >= bucketInMillis && windowInMills % bucketInMillis == 0) {
continue;
}
}
throw new EsqlIllegalArgumentException(
"Unsupported window [{}] for aggregate function [{}]; "
+ "the window must be larger than the time bucket [{}] and an exact multiple of it",
window.sourceText(),
af.sourceText(),
Objects.requireNonNull(agg.timeBucket()).sourceText()
);
}
}
}
private long getTimeBucketInMillis(TimeSeriesAggregate agg) {
final Bucket bucket = agg.timeBucket();
if (bucket != null && bucket.buckets().foldable() && bucket.buckets().fold(FoldContext.small()) instanceof Duration d) {
return d.toMillis();
}
return -1L;
}
}
|
InternalNames
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/extension/ExtensionLoader_Adaptive_Test.java
|
{
"start": 8021,
"end": 8472
}
|
interface ____.apache.dubbo.common.extension.ext1.SimpleExt is not adaptive method!"));
}
}
@Test
void test_getAdaptiveExtension_ExceptionWhenNoUrlAttribute() throws Exception {
try {
ExtensionLoader.getExtensionLoader(NoUrlParamExt.class).getAdaptiveExtension();
fail();
} catch (Exception expected) {
assertThat(expected.getMessage(), containsString("Failed to create adaptive
|
org
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
|
{
"start": 22855,
"end": 23386
}
|
class ____ extends AMService implements HSClientProtocol {
public HistoryService() {
super(HSHOSTADDRESS);
this.protocol = HSClientProtocol.class;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
hsContact = true;
Counters counters = getMyCounters();
GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
response.setCounters(counters);
return response;
}
}
|
HistoryService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/ondeletecascade/OnDeleteJoinedInheritanceTest.java
|
{
"start": 1992,
"end": 2109
}
|
class ____ {
@Id
long id;
boolean a;
}
@Entity(name = "B")
@OnDelete(action = OnDeleteAction.CASCADE)
static
|
A
|
java
|
apache__kafka
|
raft/src/test/java/org/apache/kafka/raft/LeaderStateTest.java
|
{
"start": 2116,
"end": 38470
}
|
class ____ {
private final VoterSet.VoterNode localVoterNode = VoterSetTest.voterNode(ReplicaKey.of(0, Uuid.randomUuid()));
private final int epoch = 5;
private final LogContext logContext = new LogContext();
private final MockTime time = new MockTime();
private final int fetchTimeoutMs = 2000;
private final int checkQuorumTimeoutMs = (int) (fetchTimeoutMs * CHECK_QUORUM_TIMEOUT_FACTOR);
private final int beginQuorumEpochTimeoutMs = fetchTimeoutMs / 2;
private LeaderState<?> newLeaderState(
VoterSet voters,
long epochStartOffset,
KRaftVersion kraftVersion
) {
return newLeaderState(
voters,
epochStartOffset,
kraftVersion,
Mockito.mock(BatchAccumulator.class)
);
}
private LeaderState<?> newLeaderState(
VoterSet voters,
long epochStartOffset,
KRaftVersion kraftVersion,
BatchAccumulator<?> accumulator
) {
return new LeaderState<>(
time,
localVoterNode,
epoch,
epochStartOffset,
voters,
OptionalLong.of(0L),
kraftVersion,
voters.voterIds(),
accumulator,
fetchTimeoutMs,
logContext,
new KafkaRaftMetrics(new Metrics(), "raft")
);
}
private VoterSet localWithRemoteVoterSet(IntStream remoteIds, boolean withDirectoryId) {
Map<Integer, VoterSet.VoterNode> voters = VoterSetTest.voterMap(remoteIds, withDirectoryId);
if (withDirectoryId) {
voters.put(localVoterNode.voterKey().id(), localVoterNode);
} else {
voters.put(
localVoterNode.voterKey().id(),
VoterSetTest.voterNode(
ReplicaKey.of(localVoterNode.voterKey().id(), ReplicaKey.NO_DIRECTORY_ID)
)
);
}
return VoterSetTest.voterSet(voters);
}
private VoterSet localWithRemoteVoterSet(Stream<ReplicaKey> remoteReplicaKeys, boolean withDirectoryId) {
ReplicaKey actualLocalVoter = withDirectoryId ?
localVoterNode.voterKey() :
ReplicaKey.of(localVoterNode.voterKey().id(), ReplicaKey.NO_DIRECTORY_ID);
return VoterSetTest.voterSet(
Stream.concat(Stream.of(actualLocalVoter), remoteReplicaKeys)
);
}
@Test
public void testRequireNonNullAccumulator() {
VoterSet voterSet = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
assertThrows(
NullPointerException.class,
() -> new LeaderState<>(
new MockTime(),
voterSet.voterNodes()
.stream()
.filter(node -> node.voterKey().equals(localVoterNode.voterKey()))
.findFirst()
.get(),
epoch,
0,
voterSet,
OptionalLong.of(0),
KRaftVersion.KRAFT_VERSION_1,
Set.of(),
null,
fetchTimeoutMs,
logContext,
new KafkaRaftMetrics(new Metrics(), "raft")
)
);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testFollowerAcknowledgement(boolean withDirectoryId) {
ReplicaKey node1 = replicaKey(1, withDirectoryId);
ReplicaKey node2 = replicaKey(2, withDirectoryId);
LeaderState<?> state = newLeaderState(
localWithRemoteVoterSet(Stream.of(node1, node2), withDirectoryId),
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Set.of(node1, node2), state.nonAcknowledgingVoters());
state.addAcknowledgementFrom(node1.id());
assertEquals(Set.of(node2), state.nonAcknowledgingVoters());
state.addAcknowledgementFrom(node2.id());
assertEquals(Set.of(), state.nonAcknowledgingVoters());
}
@Test
public void testNonFollowerAcknowledgement() {
int nonVoterId = 1;
LeaderState<?> state = newLeaderState(
VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey())),
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertThrows(IllegalArgumentException.class, () -> state.addAcknowledgementFrom(nonVoterId));
}
@Test
public void testUpdateHighWatermarkQuorumSizeOne() {
VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
LeaderState<?> state = newLeaderState(
voters,
15L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Optional.empty(), state.highWatermark());
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), voters));
assertEquals(Set.of(), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertTrue(state.updateLocalState(new LogOffsetMetadata(16L), voters));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
assertTrue(state.updateLocalState(new LogOffsetMetadata(20), voters));
assertEquals(Optional.of(new LogOffsetMetadata(20L)), state.highWatermark());
}
@Test
public void testNonMonotonicLocalEndOffsetUpdate() {
VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
LeaderState<?> state = newLeaderState(
voters,
15L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Optional.empty(), state.highWatermark());
assertTrue(state.updateLocalState(new LogOffsetMetadata(16L), voters));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
assertThrows(
IllegalStateException.class,
() -> state.updateLocalState(new LogOffsetMetadata(15L), voters)
);
}
@Test
public void testIdempotentEndOffsetUpdate() {
VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
LeaderState<?> state = newLeaderState(
voters,
15L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Optional.empty(), state.highWatermark());
assertTrue(state.updateLocalState(new LogOffsetMetadata(16L), voters));
assertFalse(state.updateLocalState(new LogOffsetMetadata(16L), voters));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
}
@Test
public void testUpdateHighWatermarkMetadata() {
VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
LeaderState<?> state = newLeaderState(
voters,
15L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Optional.empty(), state.highWatermark());
LogOffsetMetadata initialHw = new LogOffsetMetadata(16L, Optional.of(new MockOffsetMetadata("bar")));
assertTrue(state.updateLocalState(initialHw, voters));
assertEquals(Optional.of(initialHw), state.highWatermark());
LogOffsetMetadata updateHw = new LogOffsetMetadata(16L, Optional.of(new MockOffsetMetadata("baz")));
assertTrue(state.updateLocalState(updateHw, voters));
assertEquals(Optional.of(updateHw), state.highWatermark());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testUpdateHighWatermarkQuorumSizeTwo(boolean withDirectoryId) {
ReplicaKey otherNodeKey = replicaKey(1, withDirectoryId);
VoterSet voters = localWithRemoteVoterSet(Stream.of(otherNodeKey), withDirectoryId);
LeaderState<?> state = newLeaderState(
voters,
10L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(13L), voters));
assertEquals(Set.of(otherNodeKey), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertFalse(state.updateReplicaState(otherNodeKey, 0, new LogOffsetMetadata(10L)));
assertEquals(Set.of(), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertTrue(state.updateReplicaState(otherNodeKey, 0, new LogOffsetMetadata(11L)));
assertEquals(Optional.of(new LogOffsetMetadata(11L)), state.highWatermark());
assertTrue(state.updateReplicaState(otherNodeKey, 0, new LogOffsetMetadata(13L)));
assertEquals(Optional.of(new LogOffsetMetadata(13L)), state.highWatermark());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testUpdateHighWatermarkQuorumSizeThree(boolean withDirectoryId) {
ReplicaKey nodeKey1 = replicaKey(1, withDirectoryId);
ReplicaKey nodeKey2 = replicaKey(2, withDirectoryId);
VoterSet voters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), withDirectoryId);
LeaderState<?> state = newLeaderState(
voters,
10L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), voters));
assertEquals(Set.of(nodeKey1, nodeKey2), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(10L)));
assertEquals(Set.of(nodeKey2), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(10L)));
assertEquals(Set.of(), state.nonAcknowledgingVoters());
assertEquals(Optional.empty(), state.highWatermark());
assertTrue(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(15L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateLocalState(new LogOffsetMetadata(20L), voters));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertTrue(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(20L)));
assertEquals(Optional.of(new LogOffsetMetadata(20L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(20L)));
assertEquals(Optional.of(new LogOffsetMetadata(20L)), state.highWatermark());
}
@Test
public void testHighWatermarkDoesIncreaseFromNewVoter() {
ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid());
VoterSet originalVoters = localWithRemoteVoterSet(Stream.of(nodeKey1), true);
LeaderState<?> state = newLeaderState(
originalVoters,
5L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), originalVoters));
assertTrue(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(10L)));
assertEquals(Optional.of(new LogOffsetMetadata(10L)), state.highWatermark());
// updating replica state of node2 before it joins voterSet should not increase HW to 15L
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(15L)));
assertEquals(Optional.of(new LogOffsetMetadata(10L)), state.highWatermark());
// adding node2 to voterSet will cause HW to increase to 15L
VoterSet votersWithNode2 = originalVoters.addVoter(VoterSetTest.voterNode(nodeKey2)).get();
assertTrue(state.updateLocalState(new LogOffsetMetadata(15L), votersWithNode2));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW will not update to 16L until a majority reaches it
assertFalse(state.updateLocalState(new LogOffsetMetadata(16L), votersWithNode2));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertTrue(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
}
@Test
public void testHighWatermarkDoesNotDecreaseFromNewVoter() {
ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid());
ReplicaKey nodeKey3 = ReplicaKey.of(3, Uuid.randomUuid());
// start with three voters with HW at 15L
VoterSet originalVoters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), true);
LeaderState<?> state = newLeaderState(
originalVoters,
5L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), originalVoters));
assertTrue(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(15L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(10L)));
// updating replica state of node3 before it joins voterSet
assertFalse(state.updateReplicaState(nodeKey3, 0, new LogOffsetMetadata(10L)));
// adding node3 to voterSet should not cause HW to decrease even if majority is < HW
VoterSet votersWithNode3 = originalVoters.addVoter(VoterSetTest.voterNode(nodeKey3)).get();
assertFalse(state.updateLocalState(new LogOffsetMetadata(16L), votersWithNode3));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW will not decrease if calculated HW is anything lower than the last HW
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(13L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey3, 0, new LogOffsetMetadata(13L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW will update to 16L once a majority of the voterSet is at least 16L
assertTrue(state.updateReplicaState(nodeKey3, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
}
@Test
public void testUpdateHighWatermarkRemovingFollowerFromVoterStates() {
ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid());
VoterSet originalVoters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), true);
LeaderState<?> state = newLeaderState(
originalVoters,
10L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), originalVoters));
assertTrue(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(15L)));
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(10L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// removing node1 should not decrement HW to 10L
VoterSet votersWithoutNode1 = originalVoters.removeVoter(nodeKey1).get();
assertFalse(state.updateLocalState(new LogOffsetMetadata(17L), votersWithoutNode1));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW cannot change until after node2 catches up to last HW
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(14L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateLocalState(new LogOffsetMetadata(18L), votersWithoutNode1));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(18L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(15L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW should update to 16L
assertTrue(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
}
@Test
public void testUpdateHighWatermarkQuorumRemovingLeaderFromVoterStates() {
ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid());
VoterSet originalVoters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), true);
LeaderState<?> state = newLeaderState(
originalVoters,
10L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), originalVoters));
assertTrue(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(15L)));
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(10L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// removing leader should not decrement HW to 10L
VoterSet votersWithoutLeader = originalVoters.removeVoter(localVoterNode.voterKey()).get();
assertFalse(state.updateLocalState(new LogOffsetMetadata(17L), votersWithoutLeader));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW cannot change until node2 catches up to last HW
assertFalse(state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateLocalState(new LogOffsetMetadata(18L), votersWithoutLeader));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(14L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
assertFalse(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(15L)));
assertEquals(Optional.of(new LogOffsetMetadata(15L)), state.highWatermark());
// HW will not update to 16L until the majority of remaining voterSet (node1, node2) are at least 16L
assertTrue(state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(16L)));
assertEquals(Optional.of(new LogOffsetMetadata(16L)), state.highWatermark());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testNonMonotonicHighWatermarkUpdate(boolean withDirectoryId) {
MockTime time = new MockTime();
ReplicaKey nodeKey1 = replicaKey(1, withDirectoryId);
VoterSet voters = localWithRemoteVoterSet(Stream.of(nodeKey1), withDirectoryId);
LeaderState<?> state = newLeaderState(
voters,
0L,
KRaftVersion.KRAFT_VERSION_1
);
state.updateLocalState(new LogOffsetMetadata(10L), voters);
state.updateReplicaState(nodeKey1, time.milliseconds(), new LogOffsetMetadata(10L));
assertEquals(Optional.of(new LogOffsetMetadata(10L)), state.highWatermark());
// Follower crashes and disk is lost. It fetches an earlier offset to rebuild state.
// The leader will report an error in the logs, but will not let the high watermark rewind
assertFalse(state.updateReplicaState(nodeKey1, time.milliseconds(), new LogOffsetMetadata(5L)));
assertEquals(Optional.of(new LogOffsetMetadata(10L)), state.highWatermark());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testGetNonLeaderFollowersByFetchOffsetDescending(boolean withDirectoryId) {
ReplicaKey nodeKey1 = replicaKey(1, withDirectoryId);
ReplicaKey nodeKey2 = replicaKey(2, withDirectoryId);
long leaderStartOffset = 10L;
long leaderEndOffset = 15L;
VoterSet voters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), withDirectoryId);
LeaderState<?> state = newLeaderState(
voters,
leaderStartOffset,
KRaftVersion.KRAFT_VERSION_1
);
state.updateLocalState(new LogOffsetMetadata(leaderEndOffset), voters);
assertEquals(Optional.empty(), state.highWatermark());
state.updateReplicaState(nodeKey1, 0, new LogOffsetMetadata(leaderStartOffset));
state.updateReplicaState(nodeKey2, 0, new LogOffsetMetadata(leaderEndOffset));
// Leader should not be included; the follower with larger offset should be prioritized.
assertEquals(
List.of(nodeKey2, nodeKey1),
state.nonLeaderVotersByDescendingFetchOffset()
);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCheckQuorum(boolean withDirectoryId) {
ReplicaKey nodeKey1 = replicaKey(1, withDirectoryId);
ReplicaKey nodeKey2 = replicaKey(2, withDirectoryId);
ReplicaKey nodeKey3 = replicaKey(3, withDirectoryId);
ReplicaKey nodeKey4 = replicaKey(4, withDirectoryId);
ReplicaKey observerKey5 = replicaKey(5, withDirectoryId);
VoterSet voters = localWithRemoteVoterSet(
Stream.of(nodeKey1, nodeKey2, nodeKey3, nodeKey4),
withDirectoryId
);
LeaderState<?> state = newLeaderState(
voters,
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
int resignLeadershipTimeout = checkQuorumTimeoutMs;
// checkQuorum timeout not exceeded, should not expire the timer
time.sleep(resignLeadershipTimeout / 2);
assertTrue(state.timeUntilCheckQuorumExpires(time.milliseconds()) > 0);
// received fetch requests from 2 voter nodes, the timer should be reset
state.updateCheckQuorumForFollowingVoter(nodeKey1, time.milliseconds());
state.updateCheckQuorumForFollowingVoter(nodeKey2, time.milliseconds());
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// Since the timer was reset, it won't expire this time.
time.sleep(resignLeadershipTimeout / 2);
long remainingMs = state.timeUntilCheckQuorumExpires(time.milliseconds());
assertTrue(remainingMs > 0);
// received fetch requests from 1 voter and 1 observer nodes, the timer should not be reset.
state.updateCheckQuorumForFollowingVoter(nodeKey3, time.milliseconds());
state.updateCheckQuorumForFollowingVoter(observerKey5, time.milliseconds());
assertEquals(remainingMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// This time, the checkQuorum timer will be expired
time.sleep(resignLeadershipTimeout / 2);
assertEquals(0, state.timeUntilCheckQuorumExpires(time.milliseconds()));
}
@Test
public void testCheckQuorumAfterVoterSetChanges() {
ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid());
ReplicaKey nodeKey3 = ReplicaKey.of(3, Uuid.randomUuid());
VoterSet originalVoters = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), true);
LeaderState<?> state = newLeaderState(
originalVoters,
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// checkQuorum timeout not exceeded, should not expire the timer
time.sleep(checkQuorumTimeoutMs / 2);
assertEquals(checkQuorumTimeoutMs / 2, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// received fetch request from 1 voter node, the timer should be reset
state.updateCheckQuorumForFollowingVoter(nodeKey1, time.milliseconds());
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// Adding 1 new voter to the voter set
VoterSet votersWithNode3 = originalVoters.addVoter(VoterSetTest.voterNode(nodeKey3)).get();
state.updateLocalState(new LogOffsetMetadata(1L), votersWithNode3);
time.sleep(checkQuorumTimeoutMs / 2);
// received fetch request from 1 voter node, the timer should not be reset because the majority should be 3
state.updateCheckQuorumForFollowingVoter(nodeKey1, time.milliseconds());
assertEquals(checkQuorumTimeoutMs / 2, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// Timer should be reset after receiving another voter's fetch request
state.updateCheckQuorumForFollowingVoter(nodeKey2, time.milliseconds());
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// removing leader from the voter set
VoterSet votersWithoutLeader = votersWithNode3.removeVoter(localVoterNode.voterKey()).get();
state.updateLocalState(new LogOffsetMetadata(1L), votersWithoutLeader);
time.sleep(checkQuorumTimeoutMs / 2);
// received fetch request from 1 voter, the timer should not be reset.
state.updateCheckQuorumForFollowingVoter(nodeKey2, time.milliseconds());
assertEquals(checkQuorumTimeoutMs / 2, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// received fetch request from another voter, the timer should be reset since the current quorum majority is 2.
state.updateCheckQuorumForFollowingVoter(nodeKey1, time.milliseconds());
assertEquals(checkQuorumTimeoutMs, state.timeUntilCheckQuorumExpires(time.milliseconds()));
}
@Test
public void testCheckQuorumWithOneVoter() {
int observer = 1;
// Only 1 voter quorum
LeaderState<?> state = newLeaderState(
VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey())),
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(Long.MAX_VALUE, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// When checkQuorum timeout not exceeded and got no fetch request from voter, it should not expire the timer
time.sleep(checkQuorumTimeoutMs);
assertEquals(Long.MAX_VALUE, state.timeUntilCheckQuorumExpires(time.milliseconds()));
// received fetch requests from 1 observer node, the timer still return Long.MAX_VALUE.
state.updateCheckQuorumForFollowingVoter(
ReplicaKey.of(observer, ReplicaKey.NO_DIRECTORY_ID),
time.milliseconds()
);
assertEquals(Long.MAX_VALUE, state.timeUntilCheckQuorumExpires(time.milliseconds()));
}
@Test
public void testLeaderEndpoints() {
VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterNode.voterKey()));
LeaderState<?> state = newLeaderState(
voters,
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertNotEquals(Endpoints.empty(), state.leaderEndpoints());
assertEquals(voters.listeners(localVoterNode.voterKey().id()), state.leaderEndpoints());
}
@Test
public void testUpdateVotersFromNoDirectoryIdToDirectoryId() {
int node1 = 1;
int node2 = 2;
ReplicaKey nodeKey1 = ReplicaKey.of(node1, Uuid.randomUuid());
ReplicaKey nodeKey2 = ReplicaKey.of(node2, Uuid.randomUuid());
VoterSet votersBeforeUpgrade = localWithRemoteVoterSet(
IntStream.of(node1, node2),
false
);
LeaderState<?> state = newLeaderState(
votersBeforeUpgrade,
0L,
KRaftVersion.KRAFT_VERSION_1
);
assertFalse(state.updateLocalState(new LogOffsetMetadata(10L), votersBeforeUpgrade));
assertTrue(state.updateReplicaState(nodeKey1, 0L, new LogOffsetMetadata(10L)));
assertEquals(Optional.of(new LogOffsetMetadata(10L)), state.highWatermark());
VoterSet votersAfterUpgrade = localWithRemoteVoterSet(Stream.of(nodeKey1, nodeKey2), true);
assertFalse(state.updateLocalState(new LogOffsetMetadata(15L), votersAfterUpgrade));
assertTrue(state.updateReplicaState(nodeKey2, 0L, new LogOffsetMetadata(13L)));
assertEquals(Optional.of(new LogOffsetMetadata(13L)), state.highWatermark());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testGrantVote(boolean isLogUpToDate) {
int[] remoteIds = {1, 2, 3};
LeaderState<?> state = newLeaderState(
VoterSetTest.voterSet(
VoterSetTest.voterMap(
IntStream.concat(IntStream.of(localVoterNode.voterKey().id()), IntStream.of(remoteIds)),
false
)
),
1,
KRaftVersion.KRAFT_VERSION_1
);
IntStream.of(remoteIds).forEach(id ->
List.of(true, false).forEach(isPrevote ->
assertFalse(
state.canGrantVote(
ReplicaKey.of(id, ReplicaKey.NO_DIRECTORY_ID),
isLogUpToDate,
isPrevote
)
)
)
);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testBeginQuorumEpochTimer(boolean withDirectoryId) {
int follower1 = 1;
long epochStartOffset = 10L;
VoterSet voters = localWithRemoteVoterSet(IntStream.of(follower1), withDirectoryId);
LeaderState<?> state = newLeaderState(
voters,
epochStartOffset,
KRaftVersion.KRAFT_VERSION_1
);
assertEquals(0, state.timeUntilBeginQuorumEpochTimerExpires(time.milliseconds()));
time.sleep(5);
state.resetBeginQuorumEpochTimer(time.milliseconds());
assertEquals(beginQuorumEpochTimeoutMs, state.timeUntilBeginQuorumEpochTimerExpires(time.milliseconds()));
time.sleep(5);
assertEquals(beginQuorumEpochTimeoutMs - 5, state.timeUntilBeginQuorumEpochTimerExpires(time.milliseconds()));
time.sleep(beginQuorumEpochTimeoutMs);
assertEquals(0, state.timeUntilBeginQuorumEpochTimerExpires(time.milliseconds()));
}
@Test
public void testVolatileVoters() {
int follower1 = 1;
long epochStartOffset = 10L;
VoterSet voters = localWithRemoteVoterSet(IntStream.of(follower1), false);
LeaderState<?> state = newLeaderState(
voters,
epochStartOffset,
KRaftVersion.KRAFT_VERSION_0
);
var votersWithLeaderUpdated = state.volatileVoters().get();
assertEquals(
voters.updateVoterIgnoringDirectoryId(localVoterNode).get(),
votersWithLeaderUpdated.voters()
);
var updatedVoters = new KRaftVersionUpgrade.Voters(
votersWithLeaderUpdated
.voters()
.updateVoterIgnoringDirectoryId(VoterSetTest.voterNode(follower1, true))
.get()
);
// Upate in-memory voter and check state
assertTrue(
state.compareAndSetVolatileVoters(votersWithLeaderUpdated, updatedVoters)
);
assertEquals(updatedVoters, state.volatileVoters().get());
// Unable to perform atomic update
assertFalse(
state.compareAndSetVolatileVoters(votersWithLeaderUpdated, updatedVoters)
);
}
@Test
public void testInvalidMaybeAppendUpgradedKRaftVersion() {
int follower1 = 1;
int follower2 = 2;
long epochStartOffset = 10L;
VoterSet persistedVoters = localWithRemoteVoterSet(IntStream.of(follower1, follower2), false);
LeaderState<?> state = newLeaderState(
persistedVoters,
epochStartOffset,
KRaftVersion.KRAFT_VERSION_0
);
// none of the remove voters support kraft version 1 since the starting version is 0.
assertThrows(
InvalidUpdateVersionException.class,
() ->
state.maybeAppendUpgradedKRaftVersion(
epoch,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_0,
persistedVoters,
false,
time.milliseconds()
)
);
// epoch is less than the leader's epoch
assertThrows(
NotLeaderException.class,
() ->
state.maybeAppendUpgradedKRaftVersion(
epoch - 1,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_0,
persistedVoters,
false,
time.milliseconds()
)
);
// epoch is greater than the leader's epoch
assertThrows(
IllegalArgumentException.class,
() ->
state.maybeAppendUpgradedKRaftVersion(
epoch + 1,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_0,
persistedVoters,
false,
time.milliseconds()
)
);
// noop since the upgrade version is already 1
assertFalse(
state.maybeAppendUpgradedKRaftVersion(
epoch,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_1,
persistedVoters,
false,
time.milliseconds()
)
);
}
@Test
public void testMaybeAppendUpgradedKRaftVersion() {
int follower1 = 1;
int follower2 = 2;
long epochStartOffset = 10L;
BatchAccumulator<?> accumulator = Mockito.mock(BatchAccumulator.class);
VoterSet persistedVoters = localWithRemoteVoterSet(IntStream.of(follower1, follower2), false);
LeaderState<?> state = newLeaderState(
persistedVoters,
epochStartOffset,
KRaftVersion.KRAFT_VERSION_0,
accumulator
);
var updatedVoters = state.volatileVoters().get().voters();
updatedVoters = updatedVoters
.updateVoterIgnoringDirectoryId(VoterSetTest.voterNode(follower1, true))
.get();
updatedVoters = updatedVoters
.updateVoterIgnoringDirectoryId(VoterSetTest.voterNode(follower2, true))
.get();
state.compareAndSetVolatileVoters(
state.volatileVoters().get(),
new KRaftVersionUpgrade.Voters(updatedVoters)
);
assertTrue(
state.maybeAppendUpgradedKRaftVersion(
epoch,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_0,
persistedVoters,
false,
time.milliseconds()
)
);
// Expect control records after upgrading the kraft version.
Mockito.verify(accumulator).appendControlMessages(Mockito.any());
// maybe upgrade kraft version should be a noop after an upgrade
assertFalse(
state.maybeAppendUpgradedKRaftVersion(
epoch,
KRaftVersion.KRAFT_VERSION_1,
KRaftVersion.KRAFT_VERSION_0,
persistedVoters,
false,
time.milliseconds()
)
);
}
private record MockOffsetMetadata(String value) implements OffsetMetadata {
}
private ReplicaKey replicaKey(int id, boolean withDirectoryId) {
Uuid directoryId = withDirectoryId ? Uuid.randomUuid() : ReplicaKey.NO_DIRECTORY_ID;
return ReplicaKey.of(id, directoryId);
}
}
|
LeaderStateTest
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/BeanUtilsRuntimeHints.java
|
{
"start": 1158,
"end": 1625
}
|
class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
ReflectionHints reflectionHints = hints.reflection();
reflectionHints.registerType(ResourceEditor.class, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
reflectionHints.registerTypeIfPresent(classLoader, "org.springframework.http.MediaTypeEditor",
MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
}
}
|
BeanUtilsRuntimeHints
|
java
|
playframework__playframework
|
core/play-streams/src/main/java/play/libs/streams/Accumulator.java
|
{
"start": 1784,
"end": 8609
}
|
class ____<E, A> {
private Accumulator() {}
/**
* Map the accumulated value.
*
* @param <B> the mapped value type
* @param f The function to perform the map with.
* @param executor The executor to run the function in.
* @return A new accumulator with the mapped value.
*/
public abstract <B> Accumulator<E, B> map(Function<? super A, ? extends B> f, Executor executor);
/**
* Map the accumulated value with a function that returns a future.
*
* @param <B> the mapped value type
* @param f The function to perform the map with.
* @param executor The executor to run the function in.
* @return A new accumulator with the mapped value.
*/
public abstract <B> Accumulator<E, B> mapFuture(
Function<? super A, ? extends CompletionStage<B>> f, Executor executor);
/**
* Recover from any errors encountered by the accumulator.
*
* @param f The function to use to recover from errors.
* @param executor The executor to run the function in.
* @return A new accumulator that has recovered from errors.
*/
public abstract Accumulator<E, A> recover(
Function<? super Throwable, ? extends A> f, Executor executor);
/**
* Recover from any errors encountered by the accumulator.
*
* @param f The function to use to recover from errors.
* @param executor The executor to run the function in.
* @return A new accumulator that has recovered from errors.
*/
public abstract Accumulator<E, A> recoverWith(
Function<? super Throwable, ? extends CompletionStage<A>> f, Executor executor);
/**
* Pass the stream through the given flow before forwarding it to the accumulator.
*
* @param <D> the "In" type for the flow parameter.
* @param flow The flow to send the stream through first.
* @return A new accumulator with the given flow in its graph.
*/
public abstract <D> Accumulator<D, A> through(Flow<D, E, ?> flow);
/**
* Run the accumulator with an empty source.
*
* @param mat The flow materializer.
* @return A future that will be redeemed when the accumulator is done.
*/
public abstract CompletionStage<A> run(Materializer mat);
/**
* Run the accumulator with the given source.
*
* @param source The source to feed into the accumulator.
* @param mat The flow materializer.
* @return A future that will be redeemed when the accumulator is done.
*/
public abstract CompletionStage<A> run(Source<E, ?> source, Materializer mat);
/**
* Run the accumulator with a single element.
*
* @param element The element to feed into the accumulator.
* @param mat The flow materializer.
* @return A future that will be redeemed when the accumulator is done.
*/
public abstract CompletionStage<A> run(E element, Materializer mat);
/**
* Convert this accumulator to a sink.
*
* @return The sink.
*/
public abstract Sink<E, CompletionStage<A>> toSink();
/**
* Convert this accumulator to a Scala accumulator.
*
* @return The Scala Accumulator.
*/
public abstract play.api.libs.streams.Accumulator<E, A> asScala();
/**
* Create an accumulator from an Pekko streams sink.
*
* @param <E> the "in" type of the sink parameter.
* @param <A> the materialized result of the accumulator.
* @param sink The sink.
* @return An accumulator created from the sink.
*/
public static <E, A> Accumulator<E, A> fromSink(Sink<E, CompletionStage<A>> sink) {
return new SinkAccumulator<>(sink);
}
/**
* Create an accumulator that forwards the stream fed into it to the source it produces.
*
* <p>This is useful for when you want to send the consumed stream to another API that takes a
* Source as input.
*
* <p>Extreme care must be taken when using this accumulator - the source *must always* be
* materialized and consumed. If it isn't, this could lead to resource leaks and deadlocks
* upstream.
*
* @return An accumulator that forwards the stream to the produced source.
* @param <E> the "in" type of the parameter.
*/
public static <E> Accumulator<E, Source<E, ?>> source() {
// If Pekko streams ever provides Sink.source(), we should use that instead.
// https://github.com/akka/akka/issues/18406
return new SinkAccumulator<>(
Sink.<E>asPublisher(AsPublisher.WITHOUT_FANOUT)
.mapMaterializedValue(
publisher -> CompletableFuture.completedFuture(Source.fromPublisher(publisher))));
}
/**
* Create a done accumulator with the given value.
*
* @param <E> the "in" type of the parameter.
* @param <A> the materialized result of the accumulator.
* @param a The done value for the accumulator.
* @return The accumulator.
*/
public static <E, A> Accumulator<E, A> done(A a) {
return done(CompletableFuture.completedFuture(a));
}
/**
* Create a done accumulator with the given future.
*
* @param <E> the "in" type of the parameter.
* @param <A> the materialized result of the accumulator.
* @param a A future of the done value.
* @return The accumulator.
*/
public static <E, A> Accumulator<E, A> done(CompletionStage<A> a) {
return new StrictAccumulator<>(e -> a, Sink.<E>cancelled().mapMaterializedValue(notUsed -> a));
}
/**
* Create a done accumulator with the given future.
*
* @param <E> the "in" type of the parameter.
* @param <A> the materialized result of the accumulator.
* @param strictHandler the handler to handle the stream if it can be expressed as a single
* element.
* @param toSink The sink representation of this accumulator, in case the stream can't be
* expressed as a single element.
* @return The accumulator.
*/
public static <E, A> Accumulator<E, A> strict(
Function<Optional<E>, CompletionStage<A>> strictHandler, Sink<E, CompletionStage<A>> toSink) {
return new StrictAccumulator<>(strictHandler, toSink);
}
/**
* Flatten a completion stage of an accumulator to an accumulator.
*
* @param <E> the "in" type of the parameter.
* @param <A> the materialized result of the accumulator.
* @param stage the CompletionStage (asynchronous) accumulator
* @param materializer the stream materializer
* @return The accumulator using the given completion stage
*/
public static <E, A> Accumulator<E, A> flatten(
CompletionStage<Accumulator<E, A>> stage, Materializer materializer) {
final CompletableFuture<A> result = new CompletableFuture<>();
final FlattenSubscriber<A, E> subscriber = new FlattenSubscriber<>(stage, result, materializer);
final Sink<E, CompletionStage<A>> sink =
Sink.fromSubscriber(subscriber).mapMaterializedValue(x -> result);
return new SinkAccumulator<>(sink);
}
private static final
|
Accumulator
|
java
|
apache__flink
|
flink-core-api/src/main/java/org/apache/flink/api/java/tuple/builder/Tuple2Builder.java
|
{
"start": 1267,
"end": 1385
}
|
class ____ {@link Tuple2}.
*
* @param <T0> The type of field 0
* @param <T1> The type of field 1
*/
@Public
public
|
for
|
java
|
apache__camel
|
components/camel-test/camel-test-spring-junit5/src/main/java/org/apache/camel/test/spring/junit5/CamelSpringTestContextLoader.java
|
{
"start": 11368,
"end": 11495
}
|
class ____ test to enable inspection of annotations while the Spring context is being created.
*
* @return the test
|
under
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/config/TypedStringValue.java
|
{
"start": 4292,
"end": 6414
}
|
class ____
* @return the resolved type to convert to
* @throws ClassNotFoundException if the type cannot be resolved
*/
public @Nullable Class<?> resolveTargetType(@Nullable ClassLoader classLoader) throws ClassNotFoundException {
String typeName = getTargetTypeName();
if (typeName == null) {
return null;
}
Class<?> resolvedClass = ClassUtils.forName(typeName, classLoader);
this.targetType = resolvedClass;
return resolvedClass;
}
/**
* Set the configuration source {@code Object} for this metadata element.
* <p>The exact type of the object will depend on the configuration mechanism used.
*/
public void setSource(@Nullable Object source) {
this.source = source;
}
@Override
public @Nullable Object getSource() {
return this.source;
}
/**
* Set the type name as actually specified for this particular value, if any.
*/
public void setSpecifiedTypeName(@Nullable String specifiedTypeName) {
this.specifiedTypeName = specifiedTypeName;
}
/**
* Return the type name as actually specified for this particular value, if any.
*/
public @Nullable String getSpecifiedTypeName() {
return this.specifiedTypeName;
}
/**
* Mark this value as dynamic, i.e. as containing an expression
* and hence not being subject to caching.
*/
public void setDynamic() {
this.dynamic = true;
}
/**
* Return whether this value has been marked as dynamic.
*/
public boolean isDynamic() {
return this.dynamic;
}
@Override
public int compareTo(@Nullable TypedStringValue o) {
return Comparator.comparing(TypedStringValue::getValue).compare(this, o);
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof TypedStringValue that &&
ObjectUtils.nullSafeEquals(this.value, that.value) &&
ObjectUtils.nullSafeEquals(this.targetType, that.targetType)));
}
@Override
public int hashCode() {
return ObjectUtils.nullSafeHash(this.value, this.targetType);
}
@Override
public String toString() {
return "TypedStringValue: value [" + this.value + "], target type [" + this.targetType + "]";
}
}
|
name
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/style/SimpleValueStyler.java
|
{
"start": 1554,
"end": 2837
}
|
class ____ extends DefaultValueStyler {
/**
* Default {@link Class} styling function: {@link Class#getCanonicalName()}.
*/
public static final Function<Class<?>, String> DEFAULT_CLASS_STYLER = Class::getCanonicalName;
/**
* Default {@link Method} styling function: converts the supplied {@link Method}
* to a simple string representation of the method's signature in the form of
* {@code <method name>(<parameter types>)}, where {@code <parameter types>}
* is a comma-separated list of the {@linkplain Class#getSimpleName() simple names}
* of the parameter types.
* <p>For example, if the supplied method is a reference to
* {@link String#getBytes(java.nio.charset.Charset)}, this function will
* return {@code "getBytes(Charset)"}.
*/
public static final Function<Method, String> DEFAULT_METHOD_STYLER = SimpleValueStyler::toSimpleMethodSignature;
private final Function<Class<?>, String> classStyler;
private final Function<Method, String> methodStyler;
/**
* Create a {@code SimpleValueStyler} using the {@link #DEFAULT_CLASS_STYLER}
* and {@link #DEFAULT_METHOD_STYLER}.
*/
public SimpleValueStyler() {
this(DEFAULT_CLASS_STYLER, DEFAULT_METHOD_STYLER);
}
/**
* Create a {@code SimpleValueStyler} using the supplied
|
SimpleValueStyler
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/language/spel/SpringSimpleMultiLineExpressionTest.java
|
{
"start": 1148,
"end": 1654
}
|
class ____ extends SpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/language/SpringSimpleMultiLineExpressionTest.xml");
}
@Test
public void testSimpleMultiLineExpression() {
String result = template.requestBodyAndHeader("direct:start", "Camel", "h", "some text", String.class);
assertEquals("correct", result);
}
}
|
SpringSimpleMultiLineExpressionTest
|
java
|
apache__flink
|
flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/connector/datagen/table/RandomGeneratorVisitor.java
|
{
"start": 3394,
"end": 21837
}
|
class ____ extends DataGenVisitorBase {
public static final int RANDOM_STRING_LENGTH_DEFAULT = 100;
public static final int RANDOM_BYTES_LENGTH_DEFAULT = 100;
public static final int RANDOM_COLLECTION_LENGTH_DEFAULT = 3;
private static final float NULL_RATE_DEFAULT = 0f;
private final ConfigOptions.OptionBuilder minKey;
private final ConfigOptions.OptionBuilder maxKey;
private final ConfigOptions.OptionBuilder maxPastKey;
private final ConfigOptions.OptionBuilder nullRate;
private final ConfigOptions.OptionBuilder varLen;
public RandomGeneratorVisitor(String name, ReadableConfig config) {
super(name, config);
this.minKey =
key(
DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.MIN);
this.maxKey =
key(
DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.MAX);
this.maxPastKey =
key(
DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.MAX_PAST);
this.nullRate =
key(
DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.NULL_RATE);
this.varLen =
key(
DataGenConnectorOptionsUtil.FIELDS
+ "."
+ name
+ "."
+ DataGenConnectorOptionsUtil.VAR_LEN);
}
@Override
public DataGeneratorContainer visit(BooleanType booleanType) {
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.booleanGenerator().withNullRate(config.get(nr)), nr);
}
@Override
public DataGeneratorContainer visit(CharType charType) {
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
getRandomStringGenerator(charType.getLength()).withNullRate(config.get(nr)), nr);
}
@Override
public DataGeneratorContainer visit(VarCharType varCharType) {
ConfigOption<Integer> lenOption = getLengthOption(varCharType::getLength);
int length =
config.get(lenOption) == VarCharType.MAX_LENGTH
? RANDOM_STRING_LENGTH_DEFAULT
: config.get(lenOption);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
ConfigOption<Boolean> varLenOption = varLen.booleanType().defaultValue(false);
return DataGeneratorContainer.of(
getRandomStringGenerator(length)
.withNullRate(config.get(nr))
.withVarLen(config.get(varLenOption)),
lenOption,
nr,
varLenOption);
}
@Override
public DataGeneratorContainer visit(BinaryType binaryType) {
return DataGeneratorContainer.of(getRandomBytesGenerator(binaryType.getLength()));
}
@Override
public DataGeneratorContainer visit(VarBinaryType varBinaryType) {
ConfigOption<Integer> lenOption = getLengthOption(varBinaryType::getLength);
ConfigOption<Boolean> varLenOption = varLen.booleanType().defaultValue(false);
int length =
config.get(lenOption) == VarBinaryType.MAX_LENGTH
? RANDOM_BYTES_LENGTH_DEFAULT
: config.get(lenOption);
return DataGeneratorContainer.of(
getRandomBytesGenerator(length).withVarLen(config.get(varLenOption)),
lenOption,
varLenOption);
}
@Override
public DataGeneratorContainer visit(TinyIntType tinyIntType) {
ConfigOption<Integer> min = minKey.intType().defaultValue((int) Byte.MIN_VALUE);
ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Byte.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.byteGenerator(
config.get(min).byteValue(), config.get(max).byteValue())
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(SmallIntType smallIntType) {
ConfigOption<Integer> min = minKey.intType().defaultValue((int) Short.MIN_VALUE);
ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Short.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.shortGenerator(
config.get(min).shortValue(), config.get(max).shortValue())
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(IntType integerType) {
ConfigOption<Integer> min = minKey.intType().defaultValue(Integer.MIN_VALUE);
ConfigOption<Integer> max = maxKey.intType().defaultValue(Integer.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.intGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(BigIntType bigIntType) {
ConfigOption<Long> min = minKey.longType().defaultValue(Long.MIN_VALUE);
ConfigOption<Long> max = maxKey.longType().defaultValue(Long.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.longGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(FloatType floatType) {
ConfigOption<Float> min = minKey.floatType().defaultValue(Float.MIN_VALUE);
ConfigOption<Float> max = maxKey.floatType().defaultValue(Float.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.floatGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(DoubleType doubleType) {
ConfigOption<Double> min = minKey.doubleType().defaultValue(Double.MIN_VALUE);
ConfigOption<Double> max = maxKey.doubleType().defaultValue(Double.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.doubleGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(DecimalType decimalType) {
ConfigOption<Double> min = minKey.doubleType().defaultValue(Double.MIN_VALUE);
ConfigOption<Double> max = maxKey.doubleType().defaultValue(Double.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
new DecimalDataRandomGenerator(
decimalType.getPrecision(),
decimalType.getScale(),
config.get(min),
config.get(max),
config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(YearMonthIntervalType yearMonthIntervalType) {
ConfigOption<Integer> min = minKey.intType().defaultValue(0);
ConfigOption<Integer> max = maxKey.intType().defaultValue(120000); // Period max
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.intGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(DayTimeIntervalType dayTimeIntervalType) {
ConfigOption<Long> min = minKey.longType().defaultValue(Long.MIN_VALUE);
ConfigOption<Long> max = maxKey.longType().defaultValue(Long.MAX_VALUE);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
RandomGenerator.longGenerator(config.get(min), config.get(max))
.withNullRate(config.get(nr)),
min,
max,
nr);
}
@Override
public DataGeneratorContainer visit(TimestampType timestampType) {
ConfigOption<Duration> maxPastOption =
maxPastKey.durationType().defaultValue(Duration.ZERO);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
getRandomPastTimestampGenerator(config.get(maxPastOption))
.withNullRate(config.get(nr)),
maxPastOption,
nr);
}
@Override
public DataGeneratorContainer visit(ZonedTimestampType zonedTimestampType) {
ConfigOption<Duration> maxPastOption =
maxPastKey.durationType().defaultValue(Duration.ZERO);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
getRandomPastTimestampGenerator(config.get(maxPastOption))
.withNullRate(config.get(nr)),
maxPastOption,
nr);
}
@Override
public DataGeneratorContainer visit(LocalZonedTimestampType localZonedTimestampType) {
ConfigOption<Duration> maxPastOption =
maxPastKey.durationType().defaultValue(Duration.ZERO);
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
return DataGeneratorContainer.of(
getRandomPastTimestampGenerator(config.get(maxPastOption))
.withNullRate(config.get(nr)),
maxPastOption,
nr);
}
@Override
public DataGeneratorContainer visit(ArrayType arrayType) {
ConfigOption<Integer> lenOption = getLengthOption(() -> RANDOM_COLLECTION_LENGTH_DEFAULT);
String fieldName = name + "." + "element";
DataGeneratorContainer container =
arrayType.getElementType().accept(new RandomGeneratorVisitor(fieldName, config));
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
DataGenerator<Object[]> generator =
RandomGenerator.arrayGenerator(container.getGenerator(), config.get(lenOption));
Set<ConfigOption<?>> options = container.getOptions();
options.add(nr);
options.add(lenOption);
return DataGeneratorContainer.of(
new DataGeneratorMapper<>(generator, (GenericArrayData::new), config.get(nr)),
options.toArray(new ConfigOption<?>[0]));
}
@Override
public DataGeneratorContainer visit(MultisetType multisetType) {
ConfigOption<Integer> lenOption = getLengthOption(() -> RANDOM_COLLECTION_LENGTH_DEFAULT);
String fieldName = name + "." + "element";
DataGeneratorContainer container =
multisetType.getElementType().accept(new RandomGeneratorVisitor(fieldName, config));
DataGenerator<Map<Object, Integer>> mapGenerator =
RandomGenerator.mapGenerator(
container.getGenerator(),
RandomGenerator.intGenerator(0, 10),
config.get(lenOption));
Set<ConfigOption<?>> options = container.getOptions();
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
options.add(nr);
options.add(lenOption);
return DataGeneratorContainer.of(
new DataGeneratorMapper<>(mapGenerator, GenericMapData::new, config.get(nr)),
options.toArray(new ConfigOption<?>[0]));
}
@Override
public DataGeneratorContainer visit(MapType mapType) {
ConfigOption<Integer> lenOption = getLengthOption(() -> RANDOM_COLLECTION_LENGTH_DEFAULT);
String keyName = name + "." + "key";
String valName = name + "." + "value";
DataGeneratorContainer keyContainer =
mapType.getKeyType().accept(new RandomGeneratorVisitor(keyName, config));
DataGeneratorContainer valContainer =
mapType.getValueType().accept(new RandomGeneratorVisitor(valName, config));
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
Set<ConfigOption<?>> options = keyContainer.getOptions();
options.addAll(valContainer.getOptions());
options.add(nr);
options.add(lenOption);
DataGenerator<Map<Object, Object>> mapGenerator =
RandomGenerator.mapGenerator(
keyContainer.getGenerator(),
valContainer.getGenerator(),
config.get(lenOption));
return DataGeneratorContainer.of(
new DataGeneratorMapper<>(mapGenerator, GenericMapData::new, config.get(nr)),
options.toArray(new ConfigOption<?>[0]));
}
@Override
public DataGeneratorContainer visit(RowType rowType) {
List<DataGeneratorContainer> fieldContainers =
rowType.getFields().stream()
.map(
field -> {
String fieldName = name + "." + field.getName();
return field.getType()
.accept(new RandomGeneratorVisitor(fieldName, config));
})
.collect(Collectors.toList());
List<ConfigOption<?>> fieldOptions =
fieldContainers.stream()
.flatMap(container -> container.getOptions().stream())
.collect(Collectors.toList());
ConfigOption<Float> nr = nullRate.floatType().defaultValue(NULL_RATE_DEFAULT);
fieldOptions.add(nr);
DataGenerator[] generators =
fieldContainers.stream()
.map(DataGeneratorContainer::getGenerator)
.toArray(DataGenerator[]::new);
return DataGeneratorContainer.of(
new RowDataGenerator(generators, rowType.getFieldNames(), config.get(nr)),
fieldOptions.toArray(new ConfigOption[0]));
}
@Override
protected DataGeneratorContainer defaultMethod(LogicalType logicalType) {
throw new ValidationException("Unsupported type: " + logicalType);
}
private ConfigOption<Integer> getLengthOption(Supplier<Integer> defaultLengthSupplier) {
return key(String.join(
".",
DataGenConnectorOptionsUtil.FIELDS,
name,
DataGenConnectorOptionsUtil.LENGTH))
.intType()
.defaultValue(defaultLengthSupplier.get());
}
private static RandomGenerator<StringData> getRandomStringGenerator(int length) {
return new RandomGenerator<StringData>() {
@Override
public StringData next() {
if (nullRate == NULL_RATE_DEFAULT
|| ThreadLocalRandom.current().nextFloat() > nullRate) {
int len = generateLength(length, varLen);
return StringData.fromString(random.nextHexString(len));
}
return null;
}
};
}
private static RandomGenerator<TimestampData> getRandomPastTimestampGenerator(
Duration maxPast) {
return new RandomGenerator<TimestampData>() {
@Override
public TimestampData next() {
if (nullRate == NULL_RATE_DEFAULT
|| ThreadLocalRandom.current().nextFloat() > nullRate) {
long maxPastMillis = maxPast.toMillis();
long past = maxPastMillis > 0 ? random.nextLong(0, maxPastMillis) : 0;
return TimestampData.fromEpochMillis(System.currentTimeMillis() - past);
}
return null;
}
};
}
private static RandomGenerator<byte[]> getRandomBytesGenerator(int length) {
return new RandomGenerator<byte[]>() {
@Override
public byte[] next() {
byte[] arr = new byte[generateLength(length, varLen)];
random.getRandomGenerator().nextBytes(arr);
return arr;
}
};
}
private static int generateLength(int maxLength, boolean varLen) {
return varLen ? ThreadLocalRandom.current().nextInt(1, maxLength) : maxLength;
}
}
|
RandomGeneratorVisitor
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/model/common/FieldReference.java
|
{
"start": 328,
"end": 677
}
|
interface ____ {
/**
*
* @return variable name of the field
*/
String getVariableName();
/**
*
* @return type of the field
*/
Type getType();
/**
* @return additional template parameters
*/
default Map<String, Object> getTemplateParameter() {
return null;
}
}
|
FieldReference
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/main/java/io/quarkus/micrometer/deployment/binder/NettyBinderProcessor.java
|
{
"start": 4105,
"end": 5969
}
|
class ____ extends AbstractSupportEnabled {
MicrometerConfig mConfig;
@Override
MicrometerConfig getMicrometerConfig() {
return mConfig;
}
@Override
Class<?> getCheckClass() {
return REACTIVE_USAGE_CLASS;
}
}
@BuildStep(onlyIf = NettySupportEnabled.class)
void createNettyNettyAllocatorMetrics(BuildProducer<AdditionalBeanBuildItem> beans) {
beans.produce(AdditionalBeanBuildItem.unremovableOf(NettyMetricsProvider.class));
}
@BuildStep(onlyIf = VertxAllocatorSupportEnabled.class)
void createVertxNettyAllocatorMetrics(BuildProducer<AdditionalBeanBuildItem> beans) {
beans.produce(AdditionalBeanBuildItem.unremovableOf(VertxNettyAllocatorMetricsProvider.class));
// TODO -- VertxByteBufAllocator.DEFAULT ??
}
@BuildStep(onlyIf = VertxEventExecutorSupportEnabled.class)
void createVertxNettyEventExecutorMetrics(BuildProducer<AdditionalBeanBuildItem> beans, Capabilities capabilities) {
// this is the best we can do, since we cannot check for a Vertx bean, since this itself produces a bean
if (capabilities.isPresent(Capability.VERTX_CORE)) {
beans.produce(AdditionalBeanBuildItem.unremovableOf(VertxNettyEventExecutorMetricsProvider.class));
}
}
@BuildStep(onlyIf = ReactiveSupportEnabled.class)
void createReactiveNettyAllocatorMetrics(
BuildProducer<AdditionalBeanBuildItem> beans,
BuildProducer<ReflectiveClassBuildItem> reflectiveClasses) {
beans.produce(AdditionalBeanBuildItem.unremovableOf(ReactiveNettyMetricsProvider.class));
reflectiveClasses.produce(
ReflectiveClassBuildItem.builder(REACTIVE_USAGE_NAME)
.fields()
.build());
}
}
|
ReactiveSupportEnabled
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/model/Scope.java
|
{
"start": 944,
"end": 3380
}
|
class ____ {
/**
* Creates a {@link Scope} object from the {@link javax.inject.Scope}-annotated annotation type.
*/
public static Scope scope(DaggerAnnotation scopeAnnotation) {
checkArgument(isScope(scopeAnnotation));
return new AutoValue_Scope(scopeAnnotation);
}
/**
* Returns {@code true} if {@link #scopeAnnotation()} is a {@link javax.inject.Scope} annotation.
*/
public static boolean isScope(DaggerAnnotation scopeAnnotation) {
return isScope(scopeAnnotation.annotationTypeElement());
}
/**
* Returns {@code true} if {@code scopeAnnotationType} is a {@link javax.inject.Scope} annotation.
*/
public static boolean isScope(DaggerTypeElement scopeAnnotationType) {
return scopeAnnotationType.xprocessing().hasAnnotation(SCOPE)
|| scopeAnnotationType.xprocessing().hasAnnotation(SCOPE_JAVAX);
}
private static final XClassName PRODUCTION_SCOPE =
XClassName.get("dagger.producers", "ProductionScope");
private static final XClassName SINGLETON = XClassName.get("jakarta.inject", "Singleton");
private static final XClassName SINGLETON_JAVAX = XClassName.get("javax.inject", "Singleton");
private static final XClassName REUSABLE = XClassName.get("dagger", "Reusable");
private static final XClassName SCOPE = XClassName.get("jakarta.inject", "Scope");
private static final XClassName SCOPE_JAVAX = XClassName.get("javax.inject", "Scope");
/** The {@link DaggerAnnotation} that represents the scope annotation. */
public abstract DaggerAnnotation scopeAnnotation();
/** Returns {@code true} if this scope is the {@link javax.inject.Singleton @Singleton} scope. */
public final boolean isSingleton() {
return isScope(SINGLETON) || isScope(SINGLETON_JAVAX);
}
/** Returns {@code true} if this scope is the {@link dagger.Reusable @Reusable} scope. */
public final boolean isReusable() {
return isScope(REUSABLE);
}
/**
* Returns {@code true} if this scope is the {@link
* dagger.producers.ProductionScope @ProductionScope} scope.
*/
public final boolean isProductionScope() {
return isScope(PRODUCTION_SCOPE);
}
private boolean isScope(XClassName annotation) {
return XAnnotations.asClassName(scopeAnnotation().xprocessing()).equals(annotation);
}
/** Returns a debug representation of the scope. */
@Override
public final String toString() {
return scopeAnnotation().toString();
}
}
|
Scope
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.